response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Build JSON documentation for ansible-core CLI programs.
def build_json(output_file: pathlib.Path) -> None: """Build JSON documentation for ansible-core CLI programs.""" warnings.warn("JSON output is intended for debugging purposes only. The data model may change in future releases without notice.") output_file.parent.mkdir(exist_ok=True, parents=True) output_file.write_text(json.dumps(collect_programs(), indent=4))
Generate RST pages using the provided template.
def generate_rst(template_file: pathlib.Path) -> dict[str, str]: """Generate RST pages using the provided template.""" results: dict[str, str] = {} for cli_name, template_vars in collect_programs().items(): env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_file.parent)) template = env.get_template(template_file.name) results[cli_name] = template.render(template_vars) return results
Return information about CLI programs.
def collect_programs() -> dict[str, dict[str, t.Any]]: """Return information about CLI programs.""" programs: list[tuple[str, dict[str, t.Any]]] = [] cli_bin_name_list: list[str] = [] for source_file in (SOURCE_DIR / 'lib/ansible/cli').glob('*.py'): if source_file.name != '__init__.py': programs.append(generate_options_docs(source_file, cli_bin_name_list)) return dict(programs)
Generate doc structure from CLI module options.
def generate_options_docs(source_file: pathlib.Path, cli_bin_name_list: list[str]) -> tuple[str, dict[str, t.Any]]: """Generate doc structure from CLI module options.""" import ansible.release if str(source_file).endswith('/lib/ansible/cli/adhoc.py'): cli_name = 'ansible' cli_class_name = 'AdHocCLI' cli_module_fqn = 'ansible.cli.adhoc' else: cli_module_name = source_file.with_suffix('').name cli_name = f'ansible-{cli_module_name}' cli_class_name = f'{cli_module_name.capitalize()}CLI' cli_module_fqn = f'ansible.cli.{cli_module_name}' cli_bin_name_list.append(cli_name) cli_module = importlib.import_module(cli_module_fqn) cli_class: type[CLI] = getattr(cli_module, cli_class_name) cli = cli_class([cli_name]) cli.init_parser() parser: argparse.ArgumentParser = cli.parser long_desc = cli.__doc__ arguments: dict[str, str] | None = getattr(cli, 'ARGUMENTS', None) action_docs = get_action_docs(parser) option_names: tuple[str, ...] = tuple(itertools.chain.from_iterable(opt.options for opt in action_docs)) actions: dict[str, dict[str, t.Any]] = {} content_depth = populate_subparser_actions(parser, option_names, actions) docs = dict( version=ansible.release.__version__, source=str(source_file.relative_to(SOURCE_DIR)), cli_name=cli_name, usage=parser.format_usage(), short_desc=parser.description, long_desc=trim_docstring(long_desc), actions=actions, options=[item.__dict__ for item in action_docs], arguments=arguments, option_names=option_names, cli_bin_name_list=cli_bin_name_list, content_depth=content_depth, inventory='-i' in option_names, library='-M' in option_names, ) return cli_name, docs
Generate doc structure from CLI module subparser options.
def populate_subparser_actions(parser: argparse.ArgumentParser, shared_option_names: tuple[str, ...], actions: dict[str, dict[str, t.Any]]) -> int: """Generate doc structure from CLI module subparser options.""" try: # noinspection PyProtectedMember subparsers: dict[str, argparse.ArgumentParser] = parser._subparsers._group_actions[0].choices # type: ignore except AttributeError: subparsers = {} depth = 0 for subparser_action, subparser in subparsers.items(): subparser_option_names: set[str] = set() subparser_action_docs: set[ActionDoc] = set() subparser_actions: dict[str, dict[str, t.Any]] = {} for action_doc in get_action_docs(subparser): for option_alias in action_doc.options: if option_alias in shared_option_names: continue subparser_option_names.add(option_alias) subparser_action_docs.add(action_doc) depth = populate_subparser_actions(subparser, shared_option_names, subparser_actions) actions[subparser_action] = dict( option_names=list(subparser_option_names), options=[item.__dict__ for item in subparser_action_docs], actions=subparser_actions, name=subparser_action, desc=trim_docstring(subparser.get_default("func").__doc__), ) return depth + 1
Get action documentation from the given argument parser.
def get_action_docs(parser: argparse.ArgumentParser) -> list[ActionDoc]: """Get action documentation from the given argument parser.""" action_docs = [] # noinspection PyProtectedMember for action in parser._actions: if action.help == argparse.SUPPRESS: continue # noinspection PyProtectedMember, PyUnresolvedReferences args = action.dest.upper() if isinstance(action, argparse._StoreAction) else None if args or action.option_strings: action_docs.append(ActionDoc( desc=action.help, options=tuple(action.option_strings), arg=args, )) return action_docs
Trim and return the given docstring using the implementation from https://peps.python.org/pep-0257/#handling-docstring-indentation.
def trim_docstring(docstring: str | None) -> str: """Trim and return the given docstring using the implementation from https://peps.python.org/pep-0257/#handling-docstring-indentation.""" if not docstring: return '' # pragma: nocover # Convert tabs to spaces (following the normal Python rules) and split into a list of lines lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count) indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special) trimmed = [lines[0].strip()] if indent < sys.maxsize: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string return '\n'.join(trimmed)
get the current canonical name for a given deprecated config key
def get_real_name(key: str) -> str: """get the current canonical name for a given deprecated config key""" return CONFIG_ALIASES.get(key.upper().strip(), key.upper().strip())
returns a dictionary containing the ArchiveBox GitHub release info for the recommended upgrade version and the currently installed version
def get_versions_available_on_github(config): """ returns a dictionary containing the ArchiveBox GitHub release info for the recommended upgrade version and the currently installed version """ # we only want to perform the (relatively expensive) check for new versions # when its most relevant, e.g. when the user runs a long-running command subcommand_run_by_user = sys.argv[3] if len(sys.argv) > 3 else 'help' long_running_commands = ('add', 'schedule', 'update', 'status', 'server') if subcommand_run_by_user not in long_running_commands: return None github_releases_api = "https://api.github.com/repos/ArchiveBox/ArchiveBox/releases" response = requests.get(github_releases_api) if response.status_code != 200: stderr(f'[!] Warning: GitHub API call to check for new ArchiveBox version failed! (status={response.status_code})', color='lightyellow', config=config) return None all_releases = response.json() installed_version = parse_version_string(config['VERSION']) # find current version or nearest older version (to link to) current_version = None for idx, release in enumerate(all_releases): release_version = parse_version_string(release['tag_name']) if release_version <= installed_version: current_version = release break current_version = current_version or all_releases[-1] # recommended version is whatever comes after current_version in the release list # (perhaps too conservative to only recommend upgrading one version at a time, but it's safest) try: recommended_version = all_releases[idx+1] except IndexError: recommended_version = None return {'recommended_version': recommended_version, 'current_version': current_version}
parse bool, int, and str key=value pairs from env
def load_config_val(key: str, default: ConfigDefaultValue=None, type: Optional[Type]=None, aliases: Optional[Tuple[str, ...]]=None, config: Optional[ConfigDict]=None, env_vars: Optional[os._Environ]=None, config_file_vars: Optional[Dict[str, str]]=None) -> ConfigValue: """parse bool, int, and str key=value pairs from env""" assert isinstance(config, dict) is_read_only = type is None if is_read_only: if callable(default): return default(config) return default # get value from environment variables or config files config_keys_to_check = (key, *(aliases or ())) val = None for key in config_keys_to_check: if env_vars: val = env_vars.get(key) if val: break if config_file_vars: val = config_file_vars.get(key) if val: break is_unset = val is None if is_unset: if callable(default): return default(config) return default # calculate value based on expected type BOOL_TRUEIES = ('true', 'yes', '1') BOOL_FALSEIES = ('false', 'no', '0') if type is bool: if val.lower() in BOOL_TRUEIES: return True elif val.lower() in BOOL_FALSEIES: return False else: raise ValueError(f'Invalid configuration option {key}={val} (expected a boolean: True/False)') elif type is str: if val.lower() in (*BOOL_TRUEIES, *BOOL_FALSEIES): raise ValueError(f'Invalid configuration option {key}={val} (expected a string, but value looks like a boolean)') return val.strip() elif type is int: if not val.strip().isdigit(): raise ValueError(f'Invalid configuration option {key}={val} (expected an integer)') return int(val.strip()) elif type is list or type is dict: return json.loads(val) raise Exception('Config values can only be str, bool, int, or json')
load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf
def load_config_file(out_dir: str=None) -> Optional[Dict[str, str]]: """load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf""" out_dir = out_dir or Path(os.getenv('OUTPUT_DIR', '.')).resolve() config_path = Path(out_dir) / CONFIG_FILENAME if config_path.exists(): config_file = ConfigParser() config_file.optionxform = str config_file.read(config_path) # flatten into one namespace config_file_vars = { key.upper(): val for section, options in config_file.items() for key, val in options.items() } # print('[i] Loaded config file', os.path.abspath(config_path)) # print(config_file_vars) return config_file_vars return None
load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf
def write_config_file(config: Dict[str, str], out_dir: str=None) -> ConfigDict: """load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf""" from .system import atomic_write CONFIG_HEADER = ( """# This is the config file for your ArchiveBox collection. # # You can add options here manually in INI format, or automatically by running: # archivebox config --set KEY=VALUE # # If you modify this file manually, make sure to update your archive after by running: # archivebox init # # A list of all possible config with documentation and examples can be found here: # https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration """) out_dir = out_dir or Path(os.getenv('OUTPUT_DIR', '.')).resolve() config_path = Path(out_dir) / CONFIG_FILENAME if not config_path.exists(): atomic_write(config_path, CONFIG_HEADER) config_file = ConfigParser() config_file.optionxform = str config_file.read(config_path) with open(config_path, 'r', encoding='utf-8') as old: atomic_write(f'{config_path}.bak', old.read()) find_section = lambda key: [name for name, opts in CONFIG_SCHEMA.items() if key in opts][0] # Set up sections in empty config file for key, val in config.items(): section = find_section(key) if section in config_file: existing_config = dict(config_file[section]) else: existing_config = {} config_file[section] = {**existing_config, key: val} # always make sure there's a SECRET_KEY defined for Django existing_secret_key = None if 'SERVER_CONFIG' in config_file and 'SECRET_KEY' in config_file['SERVER_CONFIG']: existing_secret_key = config_file['SERVER_CONFIG']['SECRET_KEY'] if (not existing_secret_key) or ('not a valid secret' in existing_secret_key): from django.utils.crypto import get_random_string chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' random_secret_key = get_random_string(50, chars) if 'SERVER_CONFIG' in config_file: config_file['SERVER_CONFIG']['SECRET_KEY'] = random_secret_key else: config_file['SERVER_CONFIG'] = {'SECRET_KEY': random_secret_key} with open(config_path, 'w+', encoding='utf-8') as new: config_file.write(new) try: # validate the config by attempting to re-parse it CONFIG = load_all_config() except BaseException: # lgtm [py/catch-base-exception] # something went horribly wrong, rever to the previous version with open(f'{config_path}.bak', 'r', encoding='utf-8') as old: atomic_write(config_path, old.read()) raise if Path(f'{config_path}.bak').exists(): os.remove(f'{config_path}.bak') return { key.upper(): CONFIG.get(key.upper()) for key in config.keys() }
parses a version tag string formatted like 'vx.x.x' into (major, minor, patch) ints
def parse_version_string(version: str) -> Tuple[int, int, int]: """parses a version tag string formatted like 'vx.x.x' into (major, minor, patch) ints""" base = version.split('+')[0].split('v')[-1] # remove 'v' prefix and '+editable' suffix return tuple(int(part) for part in base.split('.'))[:3]
check the presence and return valid version line of a specified binary
def bin_version(binary: Optional[str]) -> Optional[str]: """check the presence and return valid version line of a specified binary""" abspath = bin_path(binary) if not binary or not abspath: return None try: bin_env = os.environ | {'LANG': 'C'} version_str = run([abspath, "--version"], stdout=PIPE, env=bin_env).stdout.strip().decode() if not version_str: version_str = run([abspath, "--version"], stdout=PIPE).stdout.strip().decode() # take first 3 columns of first line of version info return ' '.join(version_str.split('\n')[0].strip().split()[:3]) except OSError: pass # stderr(f'[X] Unable to find working version of dependency: {binary}', color='red') # stderr(' Make sure it\'s installed, then confirm it\'s working by running:') # stderr(f' {binary} --version') # stderr() # stderr(' If you don\'t want to install it, you can disable it via config. See here for more info:') # stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Install') return None
find any installed chrome binaries in the default locations
def find_chrome_binary() -> Optional[str]: """find any installed chrome binaries in the default locations""" # Precedence: Chromium, Chrome, Beta, Canary, Unstable, Dev # make sure data dir finding precedence order always matches binary finding order default_executable_paths = ( # '~/Library/Caches/ms-playwright/chromium-*/chrome-mac/Chromium.app/Contents/MacOS/Chromium', 'chromium-browser', 'chromium', '/Applications/Chromium.app/Contents/MacOS/Chromium', 'chrome', 'google-chrome', '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome', 'google-chrome-stable', 'google-chrome-beta', 'google-chrome-canary', '/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary', 'google-chrome-unstable', 'google-chrome-dev', ) for name in default_executable_paths: full_path_exists = shutil.which(name) if full_path_exists: return name return None
find any installed chrome user data directories in the default locations
def find_chrome_data_dir() -> Optional[str]: """find any installed chrome user data directories in the default locations""" # deprecated because this is DANGEROUS, do not re-implement/uncomment this behavior. # Going forward we want to discourage people from using their main chrome profile for archiving. # Session tokens, personal data, and cookies are often returned in server responses, # when they get archived, they are essentially burned as anyone who can view the archive # can use that data to masquerade as the logged-in user that did the archiving. # For this reason users should always create dedicated burner profiles for archiving and not use # their daily driver main accounts. # # Precedence: Chromium, Chrome, Beta, Canary, Unstable, Dev # # make sure data dir finding precedence order always matches binary finding order # default_profile_paths = ( # '~/.config/chromium', # '~/Library/Application Support/Chromium', # '~/AppData/Local/Chromium/User Data', # '~/.config/chrome', # '~/.config/google-chrome', # '~/Library/Application Support/Google/Chrome', # '~/AppData/Local/Google/Chrome/User Data', # '~/.config/google-chrome-stable', # '~/.config/google-chrome-beta', # '~/Library/Application Support/Google/Chrome Canary', # '~/AppData/Local/Google/Chrome SxS/User Data', # '~/.config/google-chrome-unstable', # '~/.config/google-chrome-dev', # ) # for path in default_profile_paths: # full_path = Path(path).resolve() # if full_path.exists(): # return full_path return None
Tell the user they passed stdin to a command that doesn't accept it
def reject_stdin(caller: str, stdin: Optional[IO]=sys.stdin) -> None: """Tell the user they passed stdin to a command that doesn't accept it""" if not stdin: return None if IN_DOCKER: # when TTY is disabled in docker we cant tell if stdin is being piped in or not # if we try to read stdin when its not piped we will hang indefinitely waiting for it return None if not stdin.isatty(): # stderr('READING STDIN TO REJECT...') stdin_raw_text = stdin.read() if stdin_raw_text.strip(): # stderr('GOT STDIN!', len(stdin_str)) stderr(f'[!] The "{caller}" command does not accept stdin (ignoring).', color='red') stderr(f' Run archivebox "{caller} --help" to see usage and examples.') stderr() # raise SystemExit(1) return None
accept any standard input and return it as a string or None
def accept_stdin(stdin: Optional[IO]=sys.stdin) -> Optional[str]: """accept any standard input and return it as a string or None""" if not stdin: return None if not stdin.isatty(): # stderr('READING STDIN TO ACCEPT...') stdin_str = stdin.read() if stdin_str: # stderr('GOT STDIN...', len(stdin_str)) return stdin_str return None
show timer in the form of progress bar, with percentage and seconds remaining
def progress_bar(seconds: int, prefix: str='') -> None: """show timer in the form of progress bar, with percentage and seconds remaining""" chunk = 'β–ˆ' if PYTHON_ENCODING == 'UTF-8' else '#' last_width = TERM_WIDTH() chunks = last_width - len(prefix) - 20 # number of progress chunks to show (aka max bar width) try: for s in range(seconds * chunks): max_width = TERM_WIDTH() if max_width < last_width: # when the terminal size is shrunk, we have to write a newline # otherwise the progress bar will keep wrapping incorrectly sys.stdout.write('\r\n') sys.stdout.flush() chunks = max_width - len(prefix) - 20 pct_complete = s / chunks / seconds * 100 log_pct = (log(pct_complete or 1, 10) / 2) * 100 # everyone likes faster progress bars ;) bar_width = round(log_pct/(100/chunks)) last_width = max_width # β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ 0.9% (1/60sec) sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format( prefix, ANSI['green' if pct_complete < 80 else 'lightyellow'], (chunk * bar_width).ljust(chunks), ANSI['reset'], round(pct_complete, 1), round(s/chunks), seconds, )) sys.stdout.flush() time.sleep(1 / chunks) # β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ 100.0% (60/60sec) sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format( prefix, ANSI['red'], chunk * chunks, ANSI['reset'], 100.0, seconds, seconds, )) sys.stdout.flush() # uncomment to have it disappear when it hits 100% instead of staying full red: # time.sleep(0.5) # sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH()), ANSI['reset'])) # sys.stdout.flush() except (KeyboardInterrupt, BrokenPipeError): print()
quote the argument with whitespace in a command so the user can copy-paste the outputted string directly to run the cmd
def log_archive_method_finished(result: "ArchiveResult"): """quote the argument with whitespace in a command so the user can copy-paste the outputted string directly to run the cmd """ # Prettify CMD string and make it safe to copy-paste by quoting arguments quoted_cmd = ' '.join( '"{}"'.format(arg) if (' ' in arg) or (':' in arg) else arg for arg in result.cmd ) if result.status == 'failed': if result.output.__class__.__name__ == 'TimeoutExpired': duration = (result.end_ts - result.start_ts).seconds hint_header = [ '{lightyellow}Extractor timed out after {}s.{reset}'.format(duration, **ANSI), ] else: hint_header = [ '{lightyellow}Extractor failed:{reset}'.format(**ANSI), ' {reset}{} {red}{}{reset}'.format( result.output.__class__.__name__.replace('ArchiveError', ''), result.output, **ANSI, ), ] # import pudb; pudb.set_trace() # Prettify error output hints string and limit to five lines hints = getattr(result.output, 'hints', None) or () if hints: if isinstance(hints, (list, tuple, type(_ for _ in ()))): hints = [hint.decode() if isinstance(hint, bytes) else str(hint) for hint in hints] else: if isinstance(hints, bytes): hints = hints.decode() hints = hints.split('\n') hints = ( ' {}{}{}'.format(ANSI['lightyellow'], line.strip(), ANSI['reset']) for line in list(hints)[:5] if line.strip() ) docker_hints = () if IN_DOCKER: docker_hints = ( ' docker run -it -v $PWD/data:/data archivebox/archivebox /bin/bash', ) # Collect and prefix output lines with indentation output_lines = [ *hint_header, *hints, '{}Run to see full output:{}'.format(ANSI['lightred'], ANSI['reset']), *docker_hints, *([' cd {};'.format(result.pwd)] if result.pwd else []), ' {}'.format(quoted_cmd), ] print('\n'.join( ' {}'.format(line) for line in output_lines if line )) print()
convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc
def pretty_path(path: Union[Path, str], pwd: Union[Path, str]=OUTPUT_DIR) -> str: """convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc""" pwd = str(Path(pwd)) # .resolve() path = str(path) if not path: return path # replace long absolute paths with ./ relative ones to save on terminal output width if path.startswith(pwd) and (pwd != '/'): path = path.replace(pwd, '.', 1) # quote paths containing spaces if ' ' in path: path = f'"{path}"' # if path is just a plain dot, replace it back with the absolute path for clarity if path == '.': path = pwd return path
Print the ArchiveBox help message and usage
def help(out_dir: Path=OUTPUT_DIR) -> None: """Print the ArchiveBox help message and usage""" all_subcommands = list_subcommands() COMMANDS_HELP_TEXT = '\n '.join( f'{cmd.ljust(20)} {summary}' for cmd, summary in all_subcommands.items() if cmd in meta_cmds ) + '\n\n ' + '\n '.join( f'{cmd.ljust(20)} {summary}' for cmd, summary in all_subcommands.items() if cmd in main_cmds ) + '\n\n ' + '\n '.join( f'{cmd.ljust(20)} {summary}' for cmd, summary in all_subcommands.items() if cmd in archive_cmds ) + '\n\n ' + '\n '.join( f'{cmd.ljust(20)} {summary}' for cmd, summary in all_subcommands.items() if cmd not in display_first ) if (Path(out_dir) / SQL_INDEX_FILENAME).exists(): print('''{green}ArchiveBox v{}: The self-hosted internet archive.{reset} {lightred}Active data directory:{reset} {} {lightred}Usage:{reset} archivebox [command] [--help] [--version] [...args] {lightred}Commands:{reset} {} {lightred}Example Use:{reset} mkdir my-archive; cd my-archive/ archivebox init archivebox status archivebox add https://example.com/some/page archivebox add --depth=1 ~/Downloads/bookmarks_export.html archivebox list --sort=timestamp --csv=timestamp,url,is_archived archivebox schedule --every=day https://example.com/some/feed.rss archivebox update --resume=15109948213.123 {lightred}Documentation:{reset} https://github.com/ArchiveBox/ArchiveBox/wiki '''.format(VERSION, out_dir, COMMANDS_HELP_TEXT, **ANSI)) else: print('{green}Welcome to ArchiveBox v{}!{reset}'.format(VERSION, **ANSI)) print() if IN_DOCKER: print('When using Docker, you need to mount a volume to use as your data dir:') print(' docker run -v /some/path:/data archivebox ...') print() print('To import an existing archive (from a previous version of ArchiveBox):') print(' 1. cd into your data dir OUTPUT_DIR (usually ArchiveBox/output) and run:') print(' 2. archivebox init') print() print('To start a new archive:') print(' 1. Create an empty directory, then cd into it and run:') print(' 2. archivebox init') print() print('For more information, see the documentation here:') print(' https://github.com/ArchiveBox/ArchiveBox/wiki')
Print the ArchiveBox version and dependency information
def version(quiet: bool=False, out_dir: Path=OUTPUT_DIR) -> None: """Print the ArchiveBox version and dependency information""" print(VERSION) if not quiet: # 0.7.1 # ArchiveBox v0.7.1+editable COMMIT_HASH=951bba5 BUILD_TIME=2023-12-17 16:46:05 1702860365 # IN_DOCKER=False IN_QEMU=False ARCH=arm64 OS=Darwin PLATFORM=macOS-14.2-arm64-arm-64bit PYTHON=Cpython # FS_ATOMIC=True FS_REMOTE=False FS_USER=501:20 FS_PERMS=644 # DEBUG=False IS_TTY=True TZ=UTC SEARCH_BACKEND=ripgrep LDAP=False p = platform.uname() print( 'ArchiveBox v{}'.format(get_version(CONFIG)), *((f'COMMIT_HASH={COMMIT_HASH[:7]}',) if COMMIT_HASH else ()), f'BUILD_TIME={BUILD_TIME}', ) print( f'IN_DOCKER={IN_DOCKER}', f'IN_QEMU={IN_QEMU}', f'ARCH={p.machine}', f'OS={p.system}', f'PLATFORM={platform.platform()}', f'PYTHON={sys.implementation.name.title()}', ) OUTPUT_IS_REMOTE_FS = DATA_LOCATIONS['OUTPUT_DIR']['is_mount'] or DATA_LOCATIONS['ARCHIVE_DIR']['is_mount'] print( f'FS_ATOMIC={ENFORCE_ATOMIC_WRITES}', f'FS_REMOTE={OUTPUT_IS_REMOTE_FS}', f'FS_USER={PUID}:{PGID}', f'FS_PERMS={OUTPUT_PERMISSIONS}', ) print( f'DEBUG={DEBUG}', f'IS_TTY={IS_TTY}', f'TZ={TIMEZONE}', f'SEARCH_BACKEND={SEARCH_BACKEND_ENGINE}', f'LDAP={LDAP}', #f'DB=django.db.backends.sqlite3 (({CONFIG["SQLITE_JOURNAL_MODE"]})', # add this if we have more useful info to show eventually ) print() print('{white}[i] Dependency versions:{reset}'.format(**ANSI)) for name, dependency in DEPENDENCIES.items(): print(printable_dependency_version(name, dependency)) # add a newline between core dependencies and extractor dependencies for easier reading if name == 'ARCHIVEBOX_BINARY': print() print() print('{white}[i] Source-code locations:{reset}'.format(**ANSI)) for name, path in CODE_LOCATIONS.items(): print(printable_folder_status(name, path)) print() if DATA_LOCATIONS['OUTPUT_DIR']['is_valid']: print('{white}[i] Data locations:{reset}'.format(**ANSI)) for name, path in DATA_LOCATIONS.items(): print(printable_folder_status(name, path)) else: print() print('{white}[i] Data locations:{reset} (not in a data directory)'.format(**ANSI)) print() check_dependencies()
Run a given ArchiveBox subcommand with the given list of args
def run(subcommand: str, subcommand_args: Optional[List[str]], stdin: Optional[IO]=None, out_dir: Path=OUTPUT_DIR) -> None: """Run a given ArchiveBox subcommand with the given list of args""" run_subcommand( subcommand=subcommand, subcommand_args=subcommand_args, stdin=stdin, pwd=out_dir, )
Initialize a new ArchiveBox collection in the current directory
def init(force: bool=False, quick: bool=False, setup: bool=False, out_dir: Path=OUTPUT_DIR) -> None: """Initialize a new ArchiveBox collection in the current directory""" from core.models import Snapshot out_dir.mkdir(exist_ok=True) is_empty = not len(set(os.listdir(out_dir)) - ALLOWED_IN_OUTPUT_DIR) if (out_dir / JSON_INDEX_FILENAME).exists(): stderr("[!] This folder contains a JSON index. It is deprecated, and will no longer be kept up to date automatically.", color="lightyellow") stderr(" You can run `archivebox list --json --with-headers > static_index.json` to manually generate it.", color="lightyellow") existing_index = (out_dir / SQL_INDEX_FILENAME).exists() if is_empty and not existing_index: print('{green}[+] Initializing a new ArchiveBox v{} collection...{reset}'.format(VERSION, **ANSI)) print('{green}----------------------------------------------------------------------{reset}'.format(**ANSI)) elif existing_index: # TODO: properly detect and print the existing version in current index as well print('{green}[^] Verifying and updating existing ArchiveBox collection to v{}...{reset}'.format(VERSION, **ANSI)) print('{green}----------------------------------------------------------------------{reset}'.format(**ANSI)) else: if force: stderr('[!] This folder appears to already have files in it, but no index.sqlite3 is present.', color='lightyellow') stderr(' Because --force was passed, ArchiveBox will initialize anyway (which may overwrite existing files).') else: stderr( ("{red}[X] This folder appears to already have files in it, but no index.sqlite3 present.{reset}\n\n" " You must run init in a completely empty directory, or an existing data folder.\n\n" " {lightred}Hint:{reset} To import an existing data folder make sure to cd into the folder first, \n" " then run and run 'archivebox init' to pick up where you left off.\n\n" " (Always make sure your data folder is backed up first before updating ArchiveBox)" ).format(out_dir, **ANSI) ) raise SystemExit(2) if existing_index: print('\n{green}[*] Verifying archive folder structure...{reset}'.format(**ANSI)) else: print('\n{green}[+] Building archive folder structure...{reset}'.format(**ANSI)) print(f' + ./{ARCHIVE_DIR.relative_to(OUTPUT_DIR)}, ./{SOURCES_DIR.relative_to(OUTPUT_DIR)}, ./{LOGS_DIR.relative_to(OUTPUT_DIR)}...') Path(SOURCES_DIR).mkdir(exist_ok=True) Path(ARCHIVE_DIR).mkdir(exist_ok=True) Path(LOGS_DIR).mkdir(exist_ok=True) print(f' + ./{CONFIG_FILE.relative_to(OUTPUT_DIR)}...') write_config_file({}, out_dir=out_dir) if (out_dir / SQL_INDEX_FILENAME).exists(): print('\n{green}[*] Verifying main SQL index and running any migrations needed...{reset}'.format(**ANSI)) else: print('\n{green}[+] Building main SQL index and running initial migrations...{reset}'.format(**ANSI)) DATABASE_FILE = out_dir / SQL_INDEX_FILENAME for migration_line in apply_migrations(out_dir): print(f' {migration_line}') assert DATABASE_FILE.exists() print() print(f' √ ./{DATABASE_FILE.relative_to(OUTPUT_DIR)}') # from django.contrib.auth.models import User # if IS_TTY and not User.objects.filter(is_superuser=True).exists(): # print('{green}[+] Creating admin user account...{reset}'.format(**ANSI)) # call_command("createsuperuser", interactive=True) print() print('{green}[*] Checking links from indexes and archive folders (safe to Ctrl+C)...{reset}'.format(**ANSI)) all_links = Snapshot.objects.none() pending_links: Dict[str, Link] = {} if existing_index: all_links = load_main_index(out_dir=out_dir, warn=False) print(' √ Loaded {} links from existing main index.'.format(all_links.count())) if quick: print(' > Skipping full snapshot directory check (quick mode)') else: try: # Links in data folders that dont match their timestamp fixed, cant_fix = fix_invalid_folder_locations(out_dir=out_dir) if fixed: print(' {lightyellow}√ Fixed {} data directory locations that didn\'t match their link timestamps.{reset}'.format(len(fixed), **ANSI)) if cant_fix: print(' {lightyellow}! Could not fix {} data directory locations due to conflicts with existing folders.{reset}'.format(len(cant_fix), **ANSI)) # Links in JSON index but not in main index orphaned_json_links = { link.url: link for link in parse_json_main_index(out_dir) if not all_links.filter(url=link.url).exists() } if orphaned_json_links: pending_links.update(orphaned_json_links) print(' {lightyellow}√ Added {} orphaned links from existing JSON index...{reset}'.format(len(orphaned_json_links), **ANSI)) # Links in data dir indexes but not in main index orphaned_data_dir_links = { link.url: link for link in parse_json_links_details(out_dir) if not all_links.filter(url=link.url).exists() } if orphaned_data_dir_links: pending_links.update(orphaned_data_dir_links) print(' {lightyellow}√ Added {} orphaned links from existing archive directories.{reset}'.format(len(orphaned_data_dir_links), **ANSI)) # Links in invalid/duplicate data dirs invalid_folders = { folder: link for folder, link in get_invalid_folders(all_links, out_dir=out_dir).items() } if invalid_folders: print(' {lightyellow}! Skipped adding {} invalid link data directories.{reset}'.format(len(invalid_folders), **ANSI)) print(' X ' + '\n X '.join(f'./{Path(folder).relative_to(OUTPUT_DIR)} {link}' for folder, link in invalid_folders.items())) print() print(' {lightred}Hint:{reset} For more information about the link data directories that were skipped, run:'.format(**ANSI)) print(' archivebox status') print(' archivebox list --status=invalid') except (KeyboardInterrupt, SystemExit): stderr() stderr('[x] Stopped checking archive directories due to Ctrl-C/SIGTERM', color='red') stderr(' Your archive data is safe, but you should re-run `archivebox init` to finish the process later.') stderr() stderr(' {lightred}Hint:{reset} In the future you can run a quick init without checking dirs like so:'.format(**ANSI)) stderr(' archivebox init --quick') raise SystemExit(1) write_main_index(list(pending_links.values()), out_dir=out_dir) print('\n{green}----------------------------------------------------------------------{reset}'.format(**ANSI)) from django.contrib.auth.models import User if (ADMIN_USERNAME and ADMIN_PASSWORD) and not User.objects.filter(username=ADMIN_USERNAME).exists(): print('{green}[+] Found ADMIN_USERNAME and ADMIN_PASSWORD configuration options, creating new admin user.{reset}'.format(**ANSI)) User.objects.create_superuser(username=ADMIN_USERNAME, password=ADMIN_PASSWORD) if existing_index: print('{green}[√] Done. Verified and updated the existing ArchiveBox collection.{reset}'.format(**ANSI)) else: print('{green}[√] Done. A new ArchiveBox collection was initialized ({} links).{reset}'.format(len(all_links) + len(pending_links), **ANSI)) json_index = out_dir / JSON_INDEX_FILENAME html_index = out_dir / HTML_INDEX_FILENAME index_name = f"{date.today()}_index_old" if json_index.exists(): json_index.rename(f"{index_name}.json") if html_index.exists(): html_index.rename(f"{index_name}.html") if setup: run_subcommand('setup', pwd=out_dir) if Snapshot.objects.count() < 25: # hide the hints for experienced users print() print(' {lightred}Hint:{reset} To view your archive index, run:'.format(**ANSI)) print(' archivebox server # then visit http://127.0.0.1:8000') print() print(' To add new links, you can run:') print(" archivebox add < ~/some/path/to/list_of_links.txt") print() print(' For more usage and examples, run:') print(' archivebox help')
Print out some info and statistics about the archive collection
def status(out_dir: Path=OUTPUT_DIR) -> None: """Print out some info and statistics about the archive collection""" check_data_folder(out_dir=out_dir) from core.models import Snapshot from django.contrib.auth import get_user_model User = get_user_model() print('{green}[*] Scanning archive main index...{reset}'.format(**ANSI)) print(ANSI['lightyellow'], f' {out_dir}/*', ANSI['reset']) num_bytes, num_dirs, num_files = get_dir_size(out_dir, recursive=False, pattern='index.') size = printable_filesize(num_bytes) print(f' Index size: {size} across {num_files} files') print() links = load_main_index(out_dir=out_dir) num_sql_links = links.count() num_link_details = sum(1 for link in parse_json_links_details(out_dir=out_dir)) print(f' > SQL Main Index: {num_sql_links} links'.ljust(36), f'(found in {SQL_INDEX_FILENAME})') print(f' > JSON Link Details: {num_link_details} links'.ljust(36), f'(found in {ARCHIVE_DIR_NAME}/*/index.json)') print() print('{green}[*] Scanning archive data directories...{reset}'.format(**ANSI)) print(ANSI['lightyellow'], f' {ARCHIVE_DIR}/*', ANSI['reset']) num_bytes, num_dirs, num_files = get_dir_size(ARCHIVE_DIR) size = printable_filesize(num_bytes) print(f' Size: {size} across {num_files} files in {num_dirs} directories') print(ANSI['black']) num_indexed = len(get_indexed_folders(links, out_dir=out_dir)) num_archived = len(get_archived_folders(links, out_dir=out_dir)) num_unarchived = len(get_unarchived_folders(links, out_dir=out_dir)) print(f' > indexed: {num_indexed}'.ljust(36), f'({get_indexed_folders.__doc__})') print(f' > archived: {num_archived}'.ljust(36), f'({get_archived_folders.__doc__})') print(f' > unarchived: {num_unarchived}'.ljust(36), f'({get_unarchived_folders.__doc__})') num_present = len(get_present_folders(links, out_dir=out_dir)) num_valid = len(get_valid_folders(links, out_dir=out_dir)) print() print(f' > present: {num_present}'.ljust(36), f'({get_present_folders.__doc__})') print(f' > valid: {num_valid}'.ljust(36), f'({get_valid_folders.__doc__})') duplicate = get_duplicate_folders(links, out_dir=out_dir) orphaned = get_orphaned_folders(links, out_dir=out_dir) corrupted = get_corrupted_folders(links, out_dir=out_dir) unrecognized = get_unrecognized_folders(links, out_dir=out_dir) num_invalid = len({**duplicate, **orphaned, **corrupted, **unrecognized}) print(f' > invalid: {num_invalid}'.ljust(36), f'({get_invalid_folders.__doc__})') print(f' > duplicate: {len(duplicate)}'.ljust(36), f'({get_duplicate_folders.__doc__})') print(f' > orphaned: {len(orphaned)}'.ljust(36), f'({get_orphaned_folders.__doc__})') print(f' > corrupted: {len(corrupted)}'.ljust(36), f'({get_corrupted_folders.__doc__})') print(f' > unrecognized: {len(unrecognized)}'.ljust(36), f'({get_unrecognized_folders.__doc__})') print(ANSI['reset']) if num_indexed: print(' {lightred}Hint:{reset} You can list link data directories by status like so:'.format(**ANSI)) print(' archivebox list --status=<status> (e.g. indexed, corrupted, archived, etc.)') if orphaned: print(' {lightred}Hint:{reset} To automatically import orphaned data directories into the main index, run:'.format(**ANSI)) print(' archivebox init') if num_invalid: print(' {lightred}Hint:{reset} You may need to manually remove or fix some invalid data directories, afterwards make sure to run:'.format(**ANSI)) print(' archivebox init') print() print('{green}[*] Scanning recent archive changes and user logins:{reset}'.format(**ANSI)) print(ANSI['lightyellow'], f' {LOGS_DIR}/*', ANSI['reset']) users = get_admins().values_list('username', flat=True) print(f' UI users {len(users)}: {", ".join(users)}') last_login = User.objects.order_by('last_login').last() if last_login: print(f' Last UI login: {last_login.username} @ {str(last_login.last_login)[:16]}') last_updated = Snapshot.objects.order_by('updated').last() if last_updated: print(f' Last changes: {str(last_updated.updated)[:16]}') if not users: print() print(' {lightred}Hint:{reset} You can create an admin user by running:'.format(**ANSI)) print(' archivebox manage createsuperuser') print() for snapshot in links.order_by('-updated')[:10]: if not snapshot.updated: continue print( ANSI['black'], ( f' > {str(snapshot.updated)[:16]} ' f'[{snapshot.num_outputs} {("X", "√")[snapshot.is_archived]} {printable_filesize(snapshot.archive_size)}] ' f'"{snapshot.title}": {snapshot.url}' )[:TERM_WIDTH()], ANSI['reset'], ) print(ANSI['black'], ' ...', ANSI['reset'])
Create a single URL archive folder with an index.json and index.html, and all the archive method outputs. You can run this to archive single pages without needing to create a whole collection with archivebox init.
def oneshot(url: str, extractors: str="", out_dir: Path=OUTPUT_DIR): """ Create a single URL archive folder with an index.json and index.html, and all the archive method outputs. You can run this to archive single pages without needing to create a whole collection with archivebox init. """ oneshot_link, _ = parse_links_memory([url]) if len(oneshot_link) > 1: stderr( '[X] You should pass a single url to the oneshot command', color='red' ) raise SystemExit(2) methods = extractors.split(",") if extractors else ignore_methods(['title']) archive_link(oneshot_link[0], out_dir=out_dir, methods=methods) return oneshot_link
Add a new URL or list of URLs to your archive
def add(urls: Union[str, List[str]], tag: str='', depth: int=0, update: bool=not ONLY_NEW, update_all: bool=False, index_only: bool=False, overwrite: bool=False, # duplicate: bool=False, # TODO: reuse the logic from admin.py resnapshot to allow adding multiple snapshots by appending timestamp automatically init: bool=False, extractors: str="", parser: str="auto", out_dir: Path=OUTPUT_DIR) -> List[Link]: """Add a new URL or list of URLs to your archive""" from core.models import Snapshot, Tag assert depth in (0, 1), 'Depth must be 0 or 1 (depth >1 is not supported yet)' extractors = extractors.split(",") if extractors else [] if init: run_subcommand('init', stdin=None, pwd=out_dir) # Load list of links from the existing index check_data_folder(out_dir=out_dir) check_dependencies() new_links: List[Link] = [] all_links = load_main_index(out_dir=out_dir) log_importing_started(urls=urls, depth=depth, index_only=index_only) if isinstance(urls, str): # save verbatim stdin to sources write_ahead_log = save_text_as_source(urls, filename='{ts}-import.txt', out_dir=out_dir) elif isinstance(urls, list): # save verbatim args to sources write_ahead_log = save_text_as_source('\n'.join(urls), filename='{ts}-import.txt', out_dir=out_dir) new_links += parse_links_from_source(write_ahead_log, root_url=None, parser=parser) # If we're going one level deeper, download each link and look for more links new_links_depth = [] if new_links and depth == 1: log_crawl_started(new_links) for new_link in new_links: try: downloaded_file = save_file_as_source(new_link.url, filename=f'{new_link.timestamp}-crawl-{new_link.domain}.txt', out_dir=out_dir) new_links_depth += parse_links_from_source(downloaded_file, root_url=new_link.url) except Exception as err: stderr('[!] Failed to get contents of URL {new_link.url}', err, color='red') imported_links = list({link.url: link for link in (new_links + new_links_depth)}.values()) new_links = dedupe_links(all_links, imported_links) write_main_index(links=new_links, out_dir=out_dir) all_links = load_main_index(out_dir=out_dir) tags = [ Tag.objects.get_or_create(name=name.strip())[0] for name in tag.split(',') if name.strip() ] if tags: for link in imported_links: snapshot = Snapshot.objects.get(url=link.url) snapshot.tags.add(*tags) snapshot.tags_str(nocache=True) snapshot.save() # print(f' √ Tagged {len(imported_links)} Snapshots with {len(tags)} tags {tags_str}') if index_only: # mock archive all the links using the fake index_only extractor method in order to update their state if overwrite: archive_links(imported_links, overwrite=overwrite, methods=['index_only'], out_dir=out_dir) else: archive_links(new_links, overwrite=False, methods=['index_only'], out_dir=out_dir) else: # fully run the archive extractor methods for each link archive_kwargs = { "out_dir": out_dir, } if extractors: archive_kwargs["methods"] = extractors stderr() ts = datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S') if update: stderr(f'[*] [{ts}] Archiving + updating {len(imported_links)}/{len(all_links)}', len(imported_links), 'URLs from added set...', color='green') archive_links(imported_links, overwrite=overwrite, **archive_kwargs) elif update_all: stderr(f'[*] [{ts}] Archiving + updating {len(all_links)}/{len(all_links)}', len(all_links), 'URLs from entire library...', color='green') archive_links(all_links, overwrite=overwrite, **archive_kwargs) elif overwrite: stderr(f'[*] [{ts}] Archiving + overwriting {len(imported_links)}/{len(all_links)}', len(imported_links), 'URLs from added set...', color='green') archive_links(imported_links, overwrite=True, **archive_kwargs) elif new_links: stderr(f'[*] [{ts}] Archiving {len(new_links)}/{len(all_links)} URLs from added set...', color='green') archive_links(new_links, overwrite=False, **archive_kwargs) if CAN_UPGRADE: hint(f"There's a new version of ArchiveBox available! Your current version is {VERSION}. You can upgrade to {VERSIONS_AVAILABLE['recommended_version']['tag_name']} ({VERSIONS_AVAILABLE['recommended_version']['html_url']}). For more on how to upgrade: https://github.com/ArchiveBox/ArchiveBox/wiki/Upgrading-or-Merging-Archives\n") return new_links
Remove the specified URLs from the archive
def remove(filter_str: Optional[str]=None, filter_patterns: Optional[List[str]]=None, filter_type: str='exact', snapshots: Optional[QuerySet]=None, after: Optional[float]=None, before: Optional[float]=None, yes: bool=False, delete: bool=False, out_dir: Path=OUTPUT_DIR) -> List[Link]: """Remove the specified URLs from the archive""" check_data_folder(out_dir=out_dir) if snapshots is None: if filter_str and filter_patterns: stderr( '[X] You should pass either a pattern as an argument, ' 'or pass a list of patterns via stdin, but not both.\n', color='red', ) raise SystemExit(2) elif not (filter_str or filter_patterns): stderr( '[X] You should pass either a pattern as an argument, ' 'or pass a list of patterns via stdin.', color='red', ) stderr() hint(('To remove all urls you can run:', 'archivebox remove --filter-type=regex ".*"')) stderr() raise SystemExit(2) elif filter_str: filter_patterns = [ptn.strip() for ptn in filter_str.split('\n')] list_kwargs = { "filter_patterns": filter_patterns, "filter_type": filter_type, "after": after, "before": before, } if snapshots: list_kwargs["snapshots"] = snapshots log_list_started(filter_patterns, filter_type) timer = TimedProgress(360, prefix=' ') try: snapshots = list_links(**list_kwargs) finally: timer.end() if not snapshots.exists(): log_removal_finished(0, 0) raise SystemExit(1) log_links = [link.as_link() for link in snapshots] log_list_finished(log_links) log_removal_started(log_links, yes=yes, delete=delete) timer = TimedProgress(360, prefix=' ') try: for snapshot in snapshots: if delete: shutil.rmtree(snapshot.as_link().link_dir, ignore_errors=True) finally: timer.end() to_remove = snapshots.count() flush_search_index(snapshots=snapshots) remove_from_sql_main_index(snapshots=snapshots, out_dir=out_dir) all_snapshots = load_main_index(out_dir=out_dir) log_removal_finished(all_snapshots.count(), to_remove) return all_snapshots
Import any new links from subscriptions and retry any previously failed/skipped links
def update(resume: Optional[float]=None, only_new: bool=ONLY_NEW, index_only: bool=False, overwrite: bool=False, filter_patterns_str: Optional[str]=None, filter_patterns: Optional[List[str]]=None, filter_type: Optional[str]=None, status: Optional[str]=None, after: Optional[str]=None, before: Optional[str]=None, extractors: str="", out_dir: Path=OUTPUT_DIR) -> List[Link]: """Import any new links from subscriptions and retry any previously failed/skipped links""" from core.models import ArchiveResult check_data_folder(out_dir=out_dir) check_dependencies() new_links: List[Link] = [] # TODO: Remove input argument: only_new extractors = extractors.split(",") if extractors else [] # Step 1: Filter for selected_links print('[*] Finding matching Snapshots to update...') print(f' - Filtering by {" ".join(filter_patterns)} ({filter_type}) {before=} {after=} {status=}...') matching_snapshots = list_links( filter_patterns=filter_patterns, filter_type=filter_type, before=before, after=after, ) print(f' - Checking {matching_snapshots.count()} snapshot folders for existing data with {status=}...') matching_folders = list_folders( links=matching_snapshots, status=status, out_dir=out_dir, ) all_links = (link for link in matching_folders.values() if link) print(' - Sorting by most unfinished -> least unfinished + date archived...') all_links = sorted(all_links, key=lambda link: (ArchiveResult.objects.filter(snapshot__url=link.url).count(), link.timestamp)) if index_only: for link in all_links: write_link_details(link, out_dir=out_dir, skip_sql_index=True) index_links(all_links, out_dir=out_dir) return all_links # Step 2: Run the archive methods for each link to_archive = new_links if only_new else all_links if resume: to_archive = [ link for link in to_archive if link.timestamp >= str(resume) ] if not to_archive: stderr('') stderr(f'[√] Nothing found to resume after {resume}', color='green') return all_links archive_kwargs = { "out_dir": out_dir, } if extractors: archive_kwargs["methods"] = extractors archive_links(to_archive, overwrite=overwrite, **archive_kwargs) # Step 4: Re-write links index with updated titles, icons, and resources all_links = load_main_index(out_dir=out_dir) return all_links
List, filter, and export information about archive entries
def list_all(filter_patterns_str: Optional[str]=None, filter_patterns: Optional[List[str]]=None, filter_type: str='exact', status: Optional[str]=None, after: Optional[float]=None, before: Optional[float]=None, sort: Optional[str]=None, csv: Optional[str]=None, json: bool=False, html: bool=False, with_headers: bool=False, out_dir: Path=OUTPUT_DIR) -> Iterable[Link]: """List, filter, and export information about archive entries""" check_data_folder(out_dir=out_dir) if filter_patterns and filter_patterns_str: stderr( '[X] You should either pass filter patterns as an arguments ' 'or via stdin, but not both.\n', color='red', ) raise SystemExit(2) elif filter_patterns_str: filter_patterns = filter_patterns_str.split('\n') snapshots = list_links( filter_patterns=filter_patterns, filter_type=filter_type, before=before, after=after, ) if sort: snapshots = snapshots.order_by(sort) folders = list_folders( links=snapshots, status=status, out_dir=out_dir, ) if json: output = generate_json_index_from_links(folders.values(), with_headers) elif html: output = generate_index_from_links(folders.values(), with_headers) elif csv: output = links_to_csv(folders.values(), cols=csv.split(','), header=with_headers) else: output = printable_folders(folders, with_headers=with_headers) print(output) return folders
Automatically install all ArchiveBox dependencies and extras
def setup(out_dir: Path=OUTPUT_DIR) -> None: """Automatically install all ArchiveBox dependencies and extras""" if not (out_dir / ARCHIVE_DIR_NAME).exists(): run_subcommand('init', stdin=None, pwd=out_dir) setup_django(out_dir=out_dir, check_db=True) from core.models import User if not User.objects.filter(is_superuser=True).exists(): stderr('\n[+] Creating new admin user for the Web UI...', color='green') run_subcommand('manage', subcommand_args=['createsuperuser'], pwd=out_dir) stderr('\n[+] Installing enabled ArchiveBox dependencies automatically...', color='green') stderr('\n Installing YOUTUBEDL_BINARY automatically using pip...') if YOUTUBEDL_VERSION: print(f'{YOUTUBEDL_VERSION} is already installed', YOUTUBEDL_BINARY) else: try: run_shell([ PYTHON_BINARY, '-m', 'pip', 'install', '--upgrade', '--no-cache-dir', '--no-warn-script-location', 'youtube_dl', ], capture_output=False, cwd=out_dir) pkg_path = run_shell([ PYTHON_BINARY, '-m', 'pip', 'show', 'youtube_dl', ], capture_output=True, text=True, cwd=out_dir).stdout.decode().split('Location: ')[-1].split('\n', 1)[0] NEW_YOUTUBEDL_BINARY = Path(pkg_path) / 'youtube_dl' / '__main__.py' os.chmod(NEW_YOUTUBEDL_BINARY, 0o777) assert NEW_YOUTUBEDL_BINARY.exists(), f'youtube_dl must exist inside {pkg_path}' config(f'YOUTUBEDL_BINARY={NEW_YOUTUBEDL_BINARY}', set=True, out_dir=out_dir) except BaseException as e: # lgtm [py/catch-base-exception] stderr(f'[X] Failed to install python packages: {e}', color='red') raise SystemExit(1) if platform.machine() == 'armv7l': stderr('\n Skip the automatic installation of CHROME_BINARY because playwright is not available on armv7.') else: stderr('\n Installing CHROME_BINARY automatically using playwright...') if CHROME_VERSION: print(f'{CHROME_VERSION} is already installed', CHROME_BINARY) else: try: run_shell([ PYTHON_BINARY, '-m', 'pip', 'install', '--upgrade', '--no-cache-dir', '--no-warn-script-location', 'playwright', ], capture_output=False, cwd=out_dir) run_shell([PYTHON_BINARY, '-m', 'playwright', 'install', 'chromium'], capture_output=False, cwd=out_dir) proc = run_shell([PYTHON_BINARY, '-c', 'from playwright.sync_api import sync_playwright; print(sync_playwright().start().chromium.executable_path)'], capture_output=True, text=True, cwd=out_dir) NEW_CHROME_BINARY = proc.stdout.decode().strip() if isinstance(proc.stdout, bytes) else proc.stdout.strip() assert NEW_CHROME_BINARY and len(NEW_CHROME_BINARY), 'CHROME_BINARY must contain a path' config(f'CHROME_BINARY={NEW_CHROME_BINARY}', set=True, out_dir=out_dir) except BaseException as e: # lgtm [py/catch-base-exception] stderr(f'[X] Failed to install chromium using playwright: {e.__class__.__name__} {e}', color='red') raise SystemExit(1) stderr('\n Installing SINGLEFILE_BINARY, READABILITY_BINARY, MERCURY_BINARY automatically using npm...') if not NODE_VERSION: stderr('[X] You must first install node & npm using your system package manager', color='red') hint([ 'https://github.com/nodesource/distributions#table-of-contents', 'or to disable all node-based modules run: archivebox config --set USE_NODE=False', ]) raise SystemExit(1) if all((SINGLEFILE_VERSION, READABILITY_VERSION, MERCURY_VERSION)): print('SINGLEFILE_BINARY, READABILITY_BINARY, and MERCURURY_BINARY are already installed') else: try: # clear out old npm package locations paths = ( out_dir / 'package.json', out_dir / 'package_lock.json', out_dir / 'node_modules', ) for path in paths: if path.is_dir(): shutil.rmtree(path, ignore_errors=True) elif path.is_file(): os.remove(path) shutil.copyfile(PACKAGE_DIR / 'package.json', out_dir / 'package.json') # copy the js requirements list from the source install into the data dir # lets blindly assume that calling out to npm via shell works reliably cross-platform 🀑 (until proven otherwise via support tickets) run_shell([ 'npm', 'install', '--prefix', str(out_dir), # force it to put the node_modules dir in this folder '--force', # overwrite any existing node_modules '--no-save', # don't bother saving updating the package.json or package-lock.json file '--no-audit', # don't bother checking for newer versions with security vuln fixes '--no-fund', # hide "please fund our project" messages '--loglevel', 'error', # only show erros (hide warn/info/debug) during installation # these args are written in blood, change with caution ], capture_output=False, cwd=out_dir) os.remove(out_dir / 'package.json') except BaseException as e: # lgtm [py/catch-base-exception] stderr(f'[X] Failed to install npm packages: {e}', color='red') hint(f'Try deleting {out_dir}/node_modules and running it again') raise SystemExit(1) stderr('\n[√] Set up ArchiveBox and its dependencies successfully.', color='green') run_shell([PYTHON_BINARY, ARCHIVEBOX_BINARY, '--version'], capture_output=False, cwd=out_dir)
Get and set your ArchiveBox project configuration values
def config(config_options_str: Optional[str]=None, config_options: Optional[List[str]]=None, get: bool=False, set: bool=False, reset: bool=False, out_dir: Path=OUTPUT_DIR) -> None: """Get and set your ArchiveBox project configuration values""" check_data_folder(out_dir=out_dir) if config_options and config_options_str: stderr( '[X] You should either pass config values as an arguments ' 'or via stdin, but not both.\n', color='red', ) raise SystemExit(2) elif config_options_str: config_options = config_options_str.split('\n') config_options = config_options or [] no_args = not (get or set or reset or config_options) matching_config: ConfigDict = {} if get or no_args: if config_options: config_options = [get_real_name(key) for key in config_options] matching_config = {key: CONFIG[key] for key in config_options if key in CONFIG} failed_config = [key for key in config_options if key not in CONFIG] if failed_config: stderr() stderr('[X] These options failed to get', color='red') stderr(' {}'.format('\n '.join(config_options))) raise SystemExit(1) else: matching_config = CONFIG print(printable_config(matching_config)) raise SystemExit(not matching_config) elif set: new_config = {} failed_options = [] for line in config_options: if line.startswith('#') or not line.strip(): continue if '=' not in line: stderr('[X] Config KEY=VALUE must have an = sign in it', color='red') stderr(f' {line}') raise SystemExit(2) raw_key, val = line.split('=', 1) raw_key = raw_key.upper().strip() key = get_real_name(raw_key) if key != raw_key: stderr(f'[i] Note: The config option {raw_key} has been renamed to {key}, please use the new name going forwards.', color='lightyellow') if key in CONFIG: new_config[key] = val.strip() else: failed_options.append(line) if new_config: before = CONFIG matching_config = write_config_file(new_config, out_dir=OUTPUT_DIR) after = load_all_config() print(printable_config(matching_config)) side_effect_changes: ConfigDict = {} for key, val in after.items(): if key in USER_CONFIG and (before[key] != after[key]) and (key not in matching_config): side_effect_changes[key] = after[key] if side_effect_changes: stderr() stderr('[i] Note: This change also affected these other options that depended on it:', color='lightyellow') print(' {}'.format(printable_config(side_effect_changes, prefix=' '))) if failed_options: stderr() stderr('[X] These options failed to set (check for typos):', color='red') stderr(' {}'.format('\n '.join(failed_options))) raise SystemExit(1) elif reset: stderr('[X] This command is not implemented yet.', color='red') stderr(' Please manually remove the relevant lines from your config file:') stderr(f' {CONFIG_FILE}') raise SystemExit(2) else: stderr('[X] You must pass either --get or --set, or no arguments to get the whole config.', color='red') stderr(' archivebox config') stderr(' archivebox config --get SOME_KEY') stderr(' archivebox config --set SOME_KEY=SOME_VALUE') raise SystemExit(2)
Set ArchiveBox to regularly import URLs at specific times using cron
def schedule(add: bool=False, show: bool=False, clear: bool=False, foreground: bool=False, run_all: bool=False, quiet: bool=False, every: Optional[str]=None, tag: str='', depth: int=0, overwrite: bool=False, update: bool=not ONLY_NEW, import_path: Optional[str]=None, out_dir: Path=OUTPUT_DIR): """Set ArchiveBox to regularly import URLs at specific times using cron""" check_data_folder(out_dir=out_dir) Path(LOGS_DIR).mkdir(exist_ok=True) cron = CronTab(user=True) cron = dedupe_cron_jobs(cron) if clear: print(cron.remove_all(comment=CRON_COMMENT)) cron.write() raise SystemExit(0) existing_jobs = list(cron.find_comment(CRON_COMMENT)) if every or add: every = every or 'day' quoted = lambda s: f'"{s}"' if (s and ' ' in str(s)) else str(s) cmd = [ 'cd', quoted(out_dir), '&&', quoted(ARCHIVEBOX_BINARY), *([ 'add', *(['--overwrite'] if overwrite else []), *(['--update'] if update else []), *([f'--tag={tag}'] if tag else []), f'--depth={depth}', f'"{import_path}"', ] if import_path else ['update']), '>>', quoted(Path(LOGS_DIR) / 'schedule.log'), '2>&1', ] new_job = cron.new(command=' '.join(cmd), comment=CRON_COMMENT) if every in ('minute', 'hour', 'day', 'month', 'year'): set_every = getattr(new_job.every(), every) set_every() elif CronSlices.is_valid(every): new_job.setall(every) else: stderr('{red}[X] Got invalid timeperiod for cron task.{reset}'.format(**ANSI)) stderr(' It must be one of minute/hour/day/month') stderr(' or a quoted cron-format schedule like:') stderr(' archivebox init --every=day --depth=1 https://example.com/some/rss/feed.xml') stderr(' archivebox init --every="0/5 * * * *" --depth=1 https://example.com/some/rss/feed.xml') raise SystemExit(1) cron = dedupe_cron_jobs(cron) cron.write() total_runs = sum(j.frequency_per_year() for j in cron) existing_jobs = list(cron.find_comment(CRON_COMMENT)) print() print('{green}[√] Scheduled new ArchiveBox cron job for user: {} ({} jobs are active).{reset}'.format(USER, len(existing_jobs), **ANSI)) print('\n'.join(f' > {cmd}' if str(cmd) == str(new_job) else f' {cmd}' for cmd in existing_jobs)) if total_runs > 60 and not quiet: stderr() stderr('{lightyellow}[!] With the current cron config, ArchiveBox is estimated to run >{} times per year.{reset}'.format(total_runs, **ANSI)) stderr(' Congrats on being an enthusiastic internet archiver! πŸ‘Œ') stderr() stderr(' Make sure you have enough storage space available to hold all the data.') stderr(' Using a compressed/deduped filesystem like ZFS is recommended if you plan on archiving a lot.') stderr('') elif show: if existing_jobs: print('\n'.join(str(cmd) for cmd in existing_jobs)) else: stderr('{red}[X] There are no ArchiveBox cron jobs scheduled for your user ({}).{reset}'.format(USER, **ANSI)) stderr(' To schedule a new job, run:') stderr(' archivebox schedule --every=[timeperiod] --depth=1 https://example.com/some/rss/feed.xml') raise SystemExit(0) cron = CronTab(user=True) cron = dedupe_cron_jobs(cron) existing_jobs = list(cron.find_comment(CRON_COMMENT)) if foreground or run_all: if not existing_jobs: stderr('{red}[X] You must schedule some jobs first before running in foreground mode.{reset}'.format(**ANSI)) stderr(' archivebox schedule --every=hour --depth=1 https://example.com/some/rss/feed.xml') raise SystemExit(1) print('{green}[*] Running {} ArchiveBox jobs in foreground task scheduler...{reset}'.format(len(existing_jobs), **ANSI)) if run_all: try: for job in existing_jobs: sys.stdout.write(f' > {job.command.split("/archivebox ")[0].split(" && ")[0]}\n') sys.stdout.write(f' > {job.command.split("/archivebox ")[-1].split(" >> ")[0]}') sys.stdout.flush() job.run() sys.stdout.write(f'\r √ {job.command.split("/archivebox ")[-1]}\n') except KeyboardInterrupt: print('\n{green}[√] Stopped.{reset}'.format(**ANSI)) raise SystemExit(1) if foreground: try: for job in existing_jobs: print(f' > {job.command.split("/archivebox ")[-1].split(" >> ")[0]}') for result in cron.run_scheduler(): print(result) except KeyboardInterrupt: print('\n{green}[√] Stopped.{reset}'.format(**ANSI)) raise SystemExit(1) if CAN_UPGRADE: hint(f"There's a new version of ArchiveBox available! Your current version is {VERSION}. You can upgrade to {VERSIONS_AVAILABLE['recommended_version']['tag_name']} ({VERSIONS_AVAILABLE['recommended_version']['html_url']}). For more on how to upgrade: https://github.com/ArchiveBox/ArchiveBox/wiki/Upgrading-or-Merging-Archives\n")
Run the ArchiveBox HTTP server
def server(runserver_args: Optional[List[str]]=None, reload: bool=False, debug: bool=False, init: bool=False, quick_init: bool=False, createsuperuser: bool=False, out_dir: Path=OUTPUT_DIR) -> None: """Run the ArchiveBox HTTP server""" runserver_args = runserver_args or [] if init: run_subcommand('init', stdin=None, pwd=out_dir) print() elif quick_init: run_subcommand('init', subcommand_args=['--quick'], stdin=None, pwd=out_dir) print() if createsuperuser: run_subcommand('manage', subcommand_args=['createsuperuser'], pwd=out_dir) print() # setup config for django runserver from . import config config.SHOW_PROGRESS = False config.DEBUG = config.DEBUG or debug check_data_folder(out_dir=out_dir) from django.core.management import call_command from django.contrib.auth.models import User print('{green}[+] Starting ArchiveBox webserver...{reset}'.format(**ANSI)) print(' > Logging errors to ./logs/errors.log') if not User.objects.filter(is_superuser=True).exists(): print('{lightyellow}[!] No admin users exist yet, you will not be able to edit links in the UI.{reset}'.format(**ANSI)) print() print(' To create an admin user, run:') print(' archivebox manage createsuperuser') print() # fallback to serving staticfiles insecurely with django when DEBUG=False if not config.DEBUG: runserver_args.append('--insecure') # TODO: serve statics w/ nginx instead # toggle autoreloading when archivebox code changes (it's on by default) if not reload: runserver_args.append('--noreload') config.SHOW_PROGRESS = False config.DEBUG = config.DEBUG or debug call_command("runserver", *runserver_args)
Run an ArchiveBox Django management command
def manage(args: Optional[List[str]]=None, out_dir: Path=OUTPUT_DIR) -> None: """Run an ArchiveBox Django management command""" check_data_folder(out_dir=out_dir) from django.core.management import execute_from_command_line if (args and "createsuperuser" in args) and (IN_DOCKER and not IS_TTY): stderr('[!] Warning: you need to pass -it to use interactive commands in docker', color='lightyellow') stderr(' docker run -it archivebox manage {}'.format(' '.join(args or ['...'])), color='lightyellow') stderr() execute_from_command_line([f'{ARCHIVEBOX_BINARY} manage', *(args or ['help'])])
Enter an interactive ArchiveBox Django shell
def shell(out_dir: Path=OUTPUT_DIR) -> None: """Enter an interactive ArchiveBox Django shell""" check_data_folder(out_dir=out_dir) from django.core.management import call_command call_command("shell_plus")
Patched of subprocess.run to kill forked child subprocesses and fix blocking io making timeout=innefective Mostly copied from https://github.com/python/cpython/blob/master/Lib/subprocess.py
def run(cmd, *args, input=None, capture_output=True, timeout=None, check=False, text=False, start_new_session=True, **kwargs): """Patched of subprocess.run to kill forked child subprocesses and fix blocking io making timeout=innefective Mostly copied from https://github.com/python/cpython/blob/master/Lib/subprocess.py """ if input is not None: if kwargs.get('stdin') is not None: raise ValueError('stdin and input arguments may not both be used.') kwargs['stdin'] = PIPE if capture_output: if ('stdout' in kwargs) or ('stderr' in kwargs): raise ValueError('stdout and stderr arguments may not be used with capture_output.') kwargs['stdout'] = PIPE kwargs['stderr'] = PIPE pgid = None try: if isinstance(cmd, (list, tuple)) and cmd[0].endswith('.py'): cmd = (PYTHON_BINARY, *cmd) with Popen(cmd, *args, start_new_session=start_new_session, **kwargs) as process: pgid = os.getpgid(process.pid) try: stdout, stderr = process.communicate(input, timeout=timeout) except TimeoutExpired as exc: process.kill() if _mswindows: # Windows accumulates the output in a single blocking # read() call run on child threads, with the timeout # being done in a join() on those threads. communicate() # _after_ kill() is required to collect that and add it # to the exception. exc.stdout, exc.stderr = process.communicate() else: # POSIX _communicate already populated the output so # far into the TimeoutExpired exception. process.wait() raise except: # Including KeyboardInterrupt, communicate handled that. process.kill() # We don't call process.wait() as .__exit__ does that for us. raise retcode = process.poll() if check and retcode: raise CalledProcessError(retcode, process.args, output=stdout, stderr=stderr) finally: # force kill any straggler subprocesses that were forked from the main proc try: os.killpg(pgid, signal.SIGINT) except Exception: pass return CompletedProcess(process.args, retcode, stdout, stderr)
Safe atomic write to filesystem by writing to temp file + atomic rename
def atomic_write(path: Union[Path, str], contents: Union[dict, str, bytes], overwrite: bool=True) -> None: """Safe atomic write to filesystem by writing to temp file + atomic rename""" mode = 'wb+' if isinstance(contents, bytes) else 'w' encoding = None if isinstance(contents, bytes) else 'utf-8' # enforce utf-8 on all text writes # print('\n> Atomic Write:', mode, path, len(contents), f'overwrite={overwrite}') try: with lib_atomic_write(path, mode=mode, overwrite=overwrite, encoding=encoding) as f: if isinstance(contents, dict): dump(contents, f, indent=4, sort_keys=True, cls=ExtendedEncoder) elif isinstance(contents, (bytes, str)): f.write(contents) except OSError as e: if ENFORCE_ATOMIC_WRITES: print(f"[X] OSError: Failed to write {path} with fcntl.F_FULLFSYNC. ({e})") print(" You can store the archive/ subfolder on a hard drive or network share that doesn't support support syncronous writes,") print(" but the main folder containing the index.sqlite3 and ArchiveBox.conf files must be on a filesystem that supports FSYNC.") raise SystemExit(1) # retry the write without forcing FSYNC (aka atomic mode) with open(path, mode=mode, encoding=encoding) as f: if isinstance(contents, dict): dump(contents, f, indent=4, sort_keys=True, cls=ExtendedEncoder) elif isinstance(contents, (bytes, str)): f.write(contents) # set file permissions os.chmod(path, int(OUTPUT_PERMISSIONS, base=8))
chmod -R <permissions> <cwd>/<path>
def chmod_file(path: str, cwd: str='.') -> None: """chmod -R <permissions> <cwd>/<path>""" root = Path(cwd) / path if not root.exists(): raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path)) if not root.is_dir(): # path is just a plain file os.chmod(root, int(OUTPUT_PERMISSIONS, base=8)) else: for subpath in Path(path).glob('**/*'): if subpath.is_dir(): # directories need execute permissions to be able to list contents os.chmod(subpath, int(DIR_OUTPUT_PERMISSIONS, base=8)) else: os.chmod(subpath, int(OUTPUT_PERMISSIONS, base=8))
copy a given file or directory to a given path, overwriting the destination
def copy_and_overwrite(from_path: Union[str, Path], to_path: Union[str, Path]): """copy a given file or directory to a given path, overwriting the destination""" if Path(from_path).is_dir(): shutil.rmtree(to_path, ignore_errors=True) shutil.copytree(from_path, to_path) else: with open(from_path, 'rb') as src: contents = src.read() atomic_write(to_path, contents)
get the total disk size of a given directory, optionally summing up recursively and limiting to a given filter list
def get_dir_size(path: Union[str, Path], recursive: bool=True, pattern: Optional[str]=None) -> Tuple[int, int, int]: """get the total disk size of a given directory, optionally summing up recursively and limiting to a given filter list """ num_bytes, num_dirs, num_files = 0, 0, 0 try: for entry in os.scandir(path): if (pattern is not None) and (pattern not in entry.path): continue if entry.is_dir(follow_symlinks=False): if not recursive: continue num_dirs += 1 bytes_inside, dirs_inside, files_inside = get_dir_size(entry.path) num_bytes += bytes_inside num_dirs += dirs_inside num_files += files_inside else: num_bytes += entry.stat(follow_symlinks=False).st_size num_files += 1 except OSError: # e.g. FileNameTooLong or other error while trying to read dir pass return num_bytes, num_dirs, num_files
check that all parentheses in a string are balanced and nested properly
def parens_are_matched(string: str, open_char='(', close_char=')'): """check that all parentheses in a string are balanced and nested properly""" count = 0 for c in string: if c == open_char: count += 1 elif c == close_char: count -= 1 if count < 0: return False return count == 0
cleanup a regex-parsed url that may contain dangling trailing parens from markdown link syntax helpful to fix URLs parsed from markdown e.g. input: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def).somemoretext result: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def IMPORTANT ASSUMPTION: valid urls wont have unbalanced or incorrectly nested parentheses e.g. this will fail the user actually wants to ingest a url like 'https://example.com/some_wei)(rd_url' in that case it will return https://example.com/some_wei (truncated up to the first unbalanced paren) This assumption is true 99.9999% of the time, and for the rare edge case the user can use url_list parser.
def fix_url_from_markdown(url_str: str) -> str: """ cleanup a regex-parsed url that may contain dangling trailing parens from markdown link syntax helpful to fix URLs parsed from markdown e.g. input: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def).somemoretext result: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def IMPORTANT ASSUMPTION: valid urls wont have unbalanced or incorrectly nested parentheses e.g. this will fail the user actually wants to ingest a url like 'https://example.com/some_wei)(rd_url' in that case it will return https://example.com/some_wei (truncated up to the first unbalanced paren) This assumption is true 99.9999% of the time, and for the rare edge case the user can use url_list parser. """ trimmed_url = url_str # cut off one trailing character at a time # until parens are balanced e.g. /a(b)c).x(y)z -> /a(b)c while not parens_are_matched(trimmed_url): trimmed_url = trimmed_url[:-1] # make sure trimmed url is still valid if re.findall(URL_REGEX, trimmed_url): return trimmed_url return url_str
Enforce function arg and kwarg types at runtime using its python3 type hints
def enforce_types(func): """ Enforce function arg and kwarg types at runtime using its python3 type hints """ # TODO: check return type as well @wraps(func) def typechecked_function(*args, **kwargs): sig = signature(func) def check_argument_type(arg_key, arg_val): try: annotation = sig.parameters[arg_key].annotation except KeyError: annotation = None if annotation is not None and annotation.__class__ is type: if not isinstance(arg_val, annotation): raise TypeError( '{}(..., {}: {}) got unexpected {} argument {}={}'.format( func.__name__, arg_key, annotation.__name__, type(arg_val).__name__, arg_key, str(arg_val)[:64], ) ) # check args for arg_val, arg_key in zip(args, sig.parameters): check_argument_type(arg_key, arg_val) # check kwargs for arg_key, arg_val in kwargs.items(): check_argument_type(arg_key, arg_val) return func(*args, **kwargs) return typechecked_function
attach the given docstring to the decorated function
def docstring(text: Optional[str]): """attach the given docstring to the decorated function""" def decorator(func): if text: func.__doc__ = text return func return decorator
(<abc>12345</def>, <abc>, </def>) -> 12345
def str_between(string: str, start: str, end: str=None) -> str: """(<abc>12345</def>, <abc>, </def>) -> 12345""" content = string.split(start, 1)[-1] if end is not None: content = content.rsplit(end, 1)[0] return content
Parse unix timestamps, iso format, and human-readable strings
def parse_date(date: Any) -> Optional[datetime]: """Parse unix timestamps, iso format, and human-readable strings""" if date is None: return None if isinstance(date, datetime): if date.tzinfo is None: return date.replace(tzinfo=timezone.utc) assert date.tzinfo.utcoffset(datetime.now()).seconds == 0, 'Refusing to load a non-UTC date!' return date if isinstance(date, (float, int)): date = str(date) if isinstance(date, str): return dateparser(date, settings={'TIMEZONE': 'UTC'}).replace(tzinfo=timezone.utc) raise ValueError('Tried to parse invalid date! {}'.format(date))
Download the contents of a remote url and return the text
def download_url(url: str, timeout: int=None) -> str: """Download the contents of a remote url and return the text""" from .config import ( TIMEOUT, CHECK_SSL_VALIDITY, WGET_USER_AGENT, COOKIES_FILE, ) timeout = timeout or TIMEOUT session = requests.Session() if COOKIES_FILE and Path(COOKIES_FILE).is_file(): cookie_jar = http.cookiejar.MozillaCookieJar(COOKIES_FILE) cookie_jar.load(ignore_discard=True, ignore_expires=True) for cookie in cookie_jar: session.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path) response = session.get( url, headers={'User-Agent': WGET_USER_AGENT}, verify=CHECK_SSL_VALIDITY, timeout=timeout, ) content_type = response.headers.get('Content-Type', '') encoding = http_content_type_encoding(content_type) or html_body_declared_encoding(response.text) if encoding is not None: response.encoding = encoding try: return response.text except UnicodeDecodeError: # if response is non-test (e.g. image or other binary files), just return the filename instead return url.rsplit('/', 1)[-1]
Download the contents of a remote url and return the headers
def get_headers(url: str, timeout: int=None) -> str: """Download the contents of a remote url and return the headers""" from .config import TIMEOUT, CHECK_SSL_VALIDITY, WGET_USER_AGENT timeout = timeout or TIMEOUT try: response = requests.head( url, headers={'User-Agent': WGET_USER_AGENT}, verify=CHECK_SSL_VALIDITY, timeout=timeout, allow_redirects=True, ) if response.status_code >= 400: raise RequestException except ReadTimeout: raise except RequestException: response = requests.get( url, headers={'User-Agent': WGET_USER_AGENT}, verify=CHECK_SSL_VALIDITY, timeout=timeout, stream=True ) return pyjson.dumps( { 'Status-Code': response.status_code, **dict(response.headers), }, indent=4, )
helper to build up a chrome shell command with arguments
def chrome_args(**options) -> List[str]: """helper to build up a chrome shell command with arguments""" # Chrome CLI flag documentation: https://peter.sh/experiments/chromium-command-line-switches/ from .config import ( CHROME_OPTIONS, CHROME_VERSION, CHROME_EXTRA_ARGS, ) options = {**CHROME_OPTIONS, **options} if not options['CHROME_BINARY']: raise Exception('Could not find any CHROME_BINARY installed on your system') cmd_args = [options['CHROME_BINARY']] cmd_args += CHROME_EXTRA_ARGS if options['CHROME_HEADLESS']: chrome_major_version = int(re.search(r'\s(\d+)\.\d', CHROME_VERSION)[1]) if chrome_major_version >= 111: cmd_args += ("--headless=new",) else: cmd_args += ('--headless',) if not options['CHROME_SANDBOX']: # assume this means we are running inside a docker container # in docker, GPU support is limited, sandboxing is unecessary, # and SHM is limited to 64MB by default (which is too low to be usable). cmd_args += ( "--no-sandbox", "--no-zygote", "--disable-dev-shm-usage", "--disable-software-rasterizer", "--run-all-compositor-stages-before-draw", "--hide-scrollbars", "--autoplay-policy=no-user-gesture-required", "--no-first-run", "--use-fake-ui-for-media-stream", "--use-fake-device-for-media-stream", "--disable-sync", # "--password-store=basic", ) # disable automatic updating when running headless, as there's no user to see the upgrade prompts cmd_args += ("--simulate-outdated-no-au='Tue, 31 Dec 2099 23:59:59 GMT'",) # set window size for screenshot/pdf/etc. rendering cmd_args += ('--window-size={}'.format(options['RESOLUTION']),) if not options['CHECK_SSL_VALIDITY']: cmd_args += ('--disable-web-security', '--ignore-certificate-errors') if options['CHROME_USER_AGENT']: cmd_args += ('--user-agent={}'.format(options['CHROME_USER_AGENT']),) if options['CHROME_TIMEOUT']: cmd_args += ('--timeout={}'.format(options['CHROME_TIMEOUT'] * 1000),) if options['CHROME_USER_DATA_DIR']: cmd_args.append('--user-data-dir={}'.format(options['CHROME_USER_DATA_DIR'])) cmd_args.append('--profile-directory=Default') return dedupe(cmd_args)
Cleans up any state or runtime files that chrome leaves behind when killed by a timeout or other error
def chrome_cleanup(): """ Cleans up any state or runtime files that chrome leaves behind when killed by a timeout or other error """ from .config import IN_DOCKER if IN_DOCKER and lexists("/home/archivebox/.config/chromium/SingletonLock"): remove_file("/home/archivebox/.config/chromium/SingletonLock")
Based on: https://stackoverflow.com/questions/19212665/python-converting-ansi-color-codes-to-html
def ansi_to_html(text: str) -> str: """ Based on: https://stackoverflow.com/questions/19212665/python-converting-ansi-color-codes-to-html """ from .config import COLOR_DICT TEMPLATE = '<span style="color: rgb{}"><br>' text = text.replace('[m', '</span>') def single_sub(match): argsdict = match.groupdict() if argsdict['arg_3'] is None: if argsdict['arg_2'] is None: _, color = 0, argsdict['arg_1'] else: _, color = argsdict['arg_1'], argsdict['arg_2'] else: _, color = argsdict['arg_3'], argsdict['arg_2'] return TEMPLATE.format(COLOR_DICT[color][0]) return COLOR_REGEX.sub(single_sub, text)
Deduplicates the given options. Options that come later clobber earlier conflicting options.
def dedupe(options: List[str]) -> List[str]: """ Deduplicates the given options. Options that come later clobber earlier conflicting options. """ deduped = {} for option in options: deduped[option.split('=')[0]] = option return list(deduped.values())
Given an API token string, check if a corresponding non-expired APIToken exists, and return its user
def auth_using_token(token, request: Optional[HttpRequest]=None) -> Optional[AbstractBaseUser]: """Given an API token string, check if a corresponding non-expired APIToken exists, and return its user""" from api.models import APIToken # lazy import model to avoid loading it at urls.py import time user = None submitted_empty_form = token in ('string', '', None) if submitted_empty_form: user = request.user # see if user is authed via django session and use that as the default else: try: token = APIToken.objects.get(token=token) if token.is_valid(): user = token.user except APIToken.DoesNotExist: pass if not user: print('[❌] Failed to authenticate API user using API Key:', request) return None
Given a username and password, check if they are valid and return the corresponding user
def auth_using_password(username, password, request: Optional[HttpRequest]=None) -> Optional[AbstractBaseUser]: """Given a username and password, check if they are valid and return the corresponding user""" user = None submitted_empty_form = (username, password) in (('string', 'string'), ('', ''), (None, None)) if submitted_empty_form: user = request.user # see if user is authed via django session and use that as the default else: user = authenticate( username=username, password=password, ) if not user: print('[❌] Failed to authenticate API user using API Key:', request) return user
find and import all valid archivebox_<subcommand>.py files in CLI_DIR
def list_subcommands() -> Dict[str, str]: """find and import all valid archivebox_<subcommand>.py files in CLI_DIR""" COMMANDS = [] for filename in os.listdir(CLI_DIR): if is_cli_module(filename): subcommand = filename.replace('archivebox_', '').replace('.py', '') module = import_module('.archivebox_{}'.format(subcommand), __package__) assert is_valid_cli_module(module, subcommand) COMMANDS.append((subcommand, module.main.__doc__)) globals()[subcommand] = module.main display_order = lambda cmd: ( display_first.index(cmd[0]) if cmd[0] in display_first else 100 + len(cmd[0]) ) return dict(sorted(COMMANDS, key=display_order))
Run a given ArchiveBox subcommand with the given list of args
def run_subcommand(subcommand: str, subcommand_args: List[str]=None, stdin: Optional[IO]=None, pwd: Union[Path, str, None]=None) -> None: """Run a given ArchiveBox subcommand with the given list of args""" subcommand_args = subcommand_args or [] if subcommand not in meta_cmds: from ..config import setup_django cmd_requires_db = subcommand in archive_cmds init_pending = '--init' in subcommand_args or '--quick-init' in subcommand_args if cmd_requires_db: check_data_folder(pwd) setup_django(in_memory_db=subcommand in fake_db, check_db=cmd_requires_db and not init_pending) if cmd_requires_db: check_migrations() module = import_module('.archivebox_{}'.format(subcommand), __package__) module.main(args=subcommand_args, stdin=stdin, pwd=pwd)
Monkey patched result
def result_list(cl): """ Monkey patched result """ num_sorted_fields = 0 return { 'cl': cl, 'num_sorted_fields': num_sorted_fields, 'results': cl.result_list, }
submit site to archive.org for archiving via their service, save returned archive url
def save_archive_dot_org(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """submit site to archive.org for archiving via their service, save returned archive url""" out_dir = out_dir or Path(link.link_dir) output: ArchiveOutput = 'archive.org.txt' archive_org_url = None submit_url = 'https://web.archive.org/save/{}'.format(link.url) # later options take precedence options = [ *CURL_ARGS, *CURL_EXTRA_ARGS, '--head', '--max-time', str(timeout), *(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []), *([] if CHECK_SSL_VALIDITY else ['--insecure']), ] cmd = [ CURL_BINARY, *dedupe(options), submit_url, ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: result = run(cmd, cwd=str(out_dir), timeout=timeout) content_location, errors = parse_archive_dot_org_response(result.stdout) if content_location: archive_org_url = content_location[0] elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]: archive_org_url = None # raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link.url))) elif errors: raise ArchiveError(', '.join(errors)) else: raise ArchiveError('Failed to find "content-location" URL header in Archive.org response.') except Exception as err: status = 'failed' output = err finally: timer.end() if output and not isinstance(output, Exception): # instead of writing None when archive.org rejects the url write the # url to resubmit it to archive.org. This is so when the user visits # the URL in person, it will attempt to re-archive it, and it'll show the # nicer error message explaining why the url was rejected if it fails. archive_org_url = archive_org_url or submit_url with open(str(out_dir / output), 'w', encoding='utf-8') as f: f.write(archive_org_url) chmod_file('archive.org.txt', cwd=str(out_dir)) output = archive_org_url return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=CURL_VERSION, output=output, status=status, **timer.stats, )
print HTML of site to file using chrome --dump-html
def save_dom(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """print HTML of site to file using chrome --dump-html""" out_dir = out_dir or Path(link.link_dir) output: ArchiveOutput = 'output.html' output_path = out_dir / output cmd = [ *chrome_args(), '--dump-dom', link.url ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: result = run(cmd, cwd=str(out_dir), timeout=timeout) atomic_write(output_path, result.stdout) if result.returncode: hints = result.stderr.decode() raise ArchiveError('Failed to save DOM', hints) chmod_file(output, cwd=str(out_dir)) except Exception as err: status = 'failed' output = err chrome_cleanup() finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=CHROME_VERSION, output=output, status=status, **timer.stats, )
download site favicon from google's favicon api
def save_favicon(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """download site favicon from google's favicon api""" out_dir = out_dir or link.link_dir output: ArchiveOutput = 'favicon.ico' # later options take precedence options = [ *CURL_ARGS, *CURL_EXTRA_ARGS, '--max-time', str(timeout), '--output', str(output), *(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []), *([] if CHECK_SSL_VALIDITY else ['--insecure']), ] cmd = [ CURL_BINARY, *dedupe(options), FAVICON_PROVIDER.format(domain(link.url)), ] status = 'failed' timer = TimedProgress(timeout, prefix=' ') try: run(cmd, cwd=str(out_dir), timeout=timeout) chmod_file(output, cwd=str(out_dir)) status = 'succeeded' except Exception as err: output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=CURL_VERSION, output=output, status=status, **timer.stats, )
download full site using git
def save_git(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """download full site using git""" out_dir = out_dir or Path(link.link_dir) output: ArchiveOutput = 'git' output_path = out_dir / output output_path.mkdir(exist_ok=True) cmd = [ GIT_BINARY, 'clone', *GIT_ARGS, *([] if CHECK_SSL_VALIDITY else ['-c', 'http.sslVerify=false']), without_query(without_fragment(link.url)), ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: result = run(cmd, cwd=str(output_path), timeout=timeout + 1) if result.returncode == 128: # ignore failed re-download when the folder already exists pass elif result.returncode > 0: hints = 'Got git response code: {}.'.format(result.returncode) raise ArchiveError('Failed to save git clone', hints) chmod_file(output, cwd=str(out_dir)) except Exception as err: status = 'failed' output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=GIT_VERSION, output=output, status=status, **timer.stats, )
Download site headers
def save_headers(link: Link, out_dir: Optional[str]=None, timeout: int=TIMEOUT) -> ArchiveResult: """Download site headers""" out_dir = Path(out_dir or link.link_dir) output_folder = out_dir.absolute() output: ArchiveOutput = 'headers.json' status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') # later options take precedence options = [ *CURL_ARGS, *CURL_EXTRA_ARGS, '--head', '--max-time', str(timeout), *(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []), *([] if CHECK_SSL_VALIDITY else ['--insecure']), ] cmd = [ CURL_BINARY, *dedupe(options), link.url, ] try: json_headers = get_headers(link.url, timeout=timeout) output_folder.mkdir(exist_ok=True) atomic_write(str(output_folder / "headers.json"), json_headers) except (Exception, OSError) as err: status = 'failed' output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=CURL_VERSION, output=output, status=status, **timer.stats, )
extract search-indexing-friendly text from an HTML document
def save_htmltotext(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """extract search-indexing-friendly text from an HTML document""" out_dir = Path(out_dir or link.link_dir) output = "htmltotext.txt" cmd = ['(internal) archivebox.extractors.htmltotext', './{singlefile,dom}.html'] timer = TimedProgress(timeout, prefix=' ') extracted_text = None status = 'failed' try: extractor = HTMLTextExtractor() document = get_html(link, out_dir) if not document: raise ArchiveError('htmltotext could not find HTML to parse for article text') extractor.feed(document) extractor.close() extracted_text = str(extractor) atomic_write(str(out_dir / output), extracted_text) status = 'succeeded' except (Exception, OSError) as err: output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=VERSION, output=output, status=status, index_texts=[extracted_text] if extracted_text else [], **timer.stats, )
Download playlists or individual video, audio, and subtitles using youtube-dl or yt-dlp
def save_media(link: Link, out_dir: Optional[Path]=None, timeout: int=MEDIA_TIMEOUT) -> ArchiveResult: """Download playlists or individual video, audio, and subtitles using youtube-dl or yt-dlp""" out_dir = out_dir or Path(link.link_dir) output: ArchiveOutput = 'media' output_path = out_dir / output output_path.mkdir(exist_ok=True) # later options take precedence options = [ *YOUTUBEDL_ARGS, *YOUTUBEDL_EXTRA_ARGS, *([] if CHECK_SSL_VALIDITY else ['--no-check-certificate']), # TODO: add --cookies-from-browser={CHROME_USER_DATA_DIR} ] cmd = [ YOUTUBEDL_BINARY, *dedupe(options), link.url, ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: result = run(cmd, cwd=str(output_path), timeout=timeout + 1) chmod_file(output, cwd=str(out_dir)) if result.returncode: if (b'ERROR: Unsupported URL' in result.stderr or b'HTTP Error 404' in result.stderr or b'HTTP Error 403' in result.stderr or b'URL could be a direct video link' in result.stderr or b'Unable to extract container ID' in result.stderr): # These happen too frequently on non-media pages to warrant printing to console pass else: hints = ( 'Got youtube-dl (or yt-dlp) response code: {}.'.format(result.returncode), *result.stderr.decode().split('\n'), ) raise ArchiveError('Failed to save media', hints) except Exception as err: status = 'failed' output = err finally: timer.end() # add video description and subtitles to full-text index # Let's try a few different index_texts = [ # errors: # * 'strict' to raise a ValueError exception if there is an # encoding error. The default value of None has the same effect. # * 'ignore' ignores errors. Note that ignoring encoding errors # can lead to data loss. # * 'xmlcharrefreplace' is only supported when writing to a # file. Characters not supported by the encoding are replaced with # the appropriate XML character reference &#nnn;. # There are a few more options described in https://docs.python.org/3/library/functions.html#open text_file.read_text(encoding='utf-8', errors='xmlcharrefreplace').strip() for text_file in ( *output_path.glob('*.description'), *output_path.glob('*.srt'), *output_path.glob('*.vtt'), *output_path.glob('*.lrc'), *output_path.glob('*.lrc'), ) ] return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=YOUTUBEDL_VERSION, output=output, status=status, index_texts=index_texts, **timer.stats, )
download reader friendly version using @postlight/mercury-parser
def save_mercury(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """download reader friendly version using @postlight/mercury-parser""" out_dir = Path(out_dir or link.link_dir) output_folder = out_dir.absolute() / "mercury" output = "mercury" status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: output_folder.mkdir(exist_ok=True) # later options take precedence options = [ *MERCURY_ARGS, *MERCURY_EXTRA_ARGS, ] # By default, get plain text version of article cmd = [ DEPENDENCIES['MERCURY_BINARY']['path'], link.url, *dedupe(options) ] result = run(cmd, cwd=out_dir, timeout=timeout) try: article_text = json.loads(result.stdout) except json.JSONDecodeError: raise ShellError(cmd, result) if article_text.get('failed'): raise ArchiveError('Mercury was not able to get article text from the URL') atomic_write(str(output_folder / "content.txt"), article_text["content"]) # Get HTML version of article cmd = [ DEPENDENCIES['MERCURY_BINARY']['path'], link.url ] result = run(cmd, cwd=out_dir, timeout=timeout) try: article_json = json.loads(result.stdout) except json.JSONDecodeError: raise ShellError(cmd, result) if article_text.get('failed'): raise ArchiveError('Mercury was not able to get article HTML from the URL') atomic_write(str(output_folder / "content.html"), article_json.pop("content")) atomic_write(str(output_folder / "article.json"), article_json) # Check for common failure cases if (result.returncode > 0): raise ShellError(cmd, result) except (ArchiveError, Exception, OSError) as err: status = 'failed' output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=MERCURY_VERSION, output=output, status=status, **timer.stats, )
print PDF of site to file using chrome --headless
def save_pdf(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """print PDF of site to file using chrome --headless""" out_dir = out_dir or Path(link.link_dir) output: ArchiveOutput = 'output.pdf' cmd = [ *chrome_args(), '--print-to-pdf', link.url, ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: result = run(cmd, cwd=str(out_dir), timeout=timeout) if result.returncode: hints = (result.stderr or result.stdout).decode() raise ArchiveError('Failed to save PDF', hints) chmod_file('output.pdf', cwd=str(out_dir)) except Exception as err: status = 'failed' output = err chrome_cleanup() finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=CHROME_VERSION, output=output, status=status, **timer.stats, )
download reader friendly version using @mozilla/readability
def save_readability(link: Link, out_dir: Optional[str]=None, timeout: int=TIMEOUT) -> ArchiveResult: """download reader friendly version using @mozilla/readability""" out_dir = Path(out_dir or link.link_dir) output_folder = out_dir.absolute() / "readability" output = "readability" # Readability Docs: https://github.com/mozilla/readability status = 'succeeded' # fake command to show the user so they have something to try debugging if get_html fails cmd = [ CURL_BINARY, link.url ] readability_content = None timer = TimedProgress(timeout, prefix=' ') try: document = get_html(link, out_dir) temp_doc = NamedTemporaryFile(delete=False) temp_doc.write(document.encode("utf-8")) temp_doc.close() if not document or len(document) < 10: raise ArchiveError('Readability could not find HTML to parse for article text') cmd = [ DEPENDENCIES['READABILITY_BINARY']['path'], temp_doc.name, link.url, ] result = run(cmd, cwd=out_dir, timeout=timeout) try: result_json = json.loads(result.stdout) assert result_json and 'content' in result_json, 'Readability output is not valid JSON' except json.JSONDecodeError: raise ArchiveError('Readability was not able to archive the page (invalid JSON)', result.stdout + result.stderr) output_folder.mkdir(exist_ok=True) readability_content = result_json.pop("textContent") atomic_write(str(output_folder / "content.html"), result_json.pop("content")) atomic_write(str(output_folder / "content.txt"), readability_content) atomic_write(str(output_folder / "article.json"), result_json) output_tail = [ line.strip() for line in (result.stdout + result.stderr).decode().rsplit('\n', 5)[-5:] if line.strip() ] hints = ( 'Got readability response code: {}.'.format(result.returncode), *output_tail, ) # Check for common failure cases if (result.returncode > 0): raise ArchiveError(f'Readability was not able to archive the page (status={result.returncode})', hints) except (Exception, OSError) as err: status = 'failed' output = err # prefer Chrome dom output to singlefile because singlefile often contains huge url(data:image/...base64) strings that make the html too long to parse with readability cmd = [cmd[0], './{dom,singlefile}.html'] finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=READABILITY_VERSION, output=output, status=status, index_texts=[readability_content] if readability_content else [], **timer.stats, )
take screenshot of site using chrome --headless
def save_screenshot(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """take screenshot of site using chrome --headless""" out_dir = out_dir or Path(link.link_dir) output: ArchiveOutput = 'screenshot.png' cmd = [ *chrome_args(), '--screenshot', link.url, ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: result = run(cmd, cwd=str(out_dir), timeout=timeout) if result.returncode: hints = (result.stderr or result.stdout).decode() raise ArchiveError('Failed to save screenshot', hints) chmod_file(output, cwd=str(out_dir)) except Exception as err: status = 'failed' output = err chrome_cleanup() finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=CHROME_VERSION, output=output, status=status, **timer.stats, )
download full site using single-file
def save_singlefile(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """download full site using single-file""" out_dir = out_dir or Path(link.link_dir) output = "singlefile.html" browser_args = chrome_args(CHROME_TIMEOUT=0) # SingleFile CLI Docs: https://github.com/gildas-lormeau/SingleFile/tree/master/cli browser_args = '--browser-args={}'.format(json.dumps(browser_args[1:])) # later options take precedence options = [ '--browser-executable-path={}'.format(CHROME_BINARY), *(["--browser-cookies-file={}".format(COOKIES_FILE)] if COOKIES_FILE else []), browser_args, *SINGLEFILE_ARGS, *SINGLEFILE_EXTRA_ARGS, ] cmd = [ DEPENDENCIES['SINGLEFILE_BINARY']['path'], *dedupe(options), link.url, output, ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') result = None try: result = run(cmd, cwd=str(out_dir), timeout=timeout) # parse out number of files downloaded from last line of stderr: # "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)" output_tail = [ line.strip() for line in (result.stdout + result.stderr).decode().rsplit('\n', 5)[-5:] if line.strip() ] hints = ( 'Got single-file response code: {}.'.format(result.returncode), *output_tail, ) # Check for common failure cases if (result.returncode > 0) or not (out_dir / output).is_file(): raise ArchiveError(f'SingleFile was not able to archive the page (status={result.returncode})', hints) chmod_file(output, cwd=str(out_dir)) except (Exception, OSError) as err: status = 'failed' # TODO: Make this prettier. This is necessary to run the command (escape JSON internal quotes). cmd[2] = browser_args.replace('"', "\\\"") err.hints = (result.stdout + result.stderr).decode().split('\n') output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=SINGLEFILE_VERSION, output=output, status=status, **timer.stats, )
Try to find wget, singlefile and then dom files. If none is found, download the url again.
def get_html(link: Link, path: Path, timeout: int=TIMEOUT) -> str: """ Try to find wget, singlefile and then dom files. If none is found, download the url again. """ canonical = link.canonical_outputs() abs_path = path.absolute() # prefer chrome-generated DOM dump to singlefile as singlefile output often includes HUGE url(data:image/...base64) strings that crash parsers sources = [canonical["dom_path"], canonical["singlefile_path"], canonical["wget_path"]] document = None for source in sources: try: with open(abs_path / source, "r", encoding="utf-8") as f: document = f.read() break except (FileNotFoundError, TypeError, UnicodeDecodeError): continue if document is None: return download_url(link.url, timeout=timeout) else: return document
try to guess the page's title from its content
def save_title(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """try to guess the page's title from its content""" from core.models import Snapshot output: ArchiveOutput = None # later options take precedence options = [ *CURL_ARGS, *CURL_EXTRA_ARGS, '--max-time', str(timeout), *(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []), *([] if CHECK_SSL_VALIDITY else ['--insecure']), ] cmd = [ CURL_BINARY, *dedupe(options), link.url, ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: html = get_html(link, out_dir, timeout=timeout) try: # try using relatively strict html parser first parser = TitleParser() parser.feed(html) output = parser.title if output is None: raise except Exception: # fallback to regex that can handle broken/malformed html output = extract_title_with_regex(html) # if title is better than the one in the db, update db with new title if isinstance(output, str) and output: if not link.title or len(output) >= len(link.title): Snapshot.objects.filter(url=link.url, timestamp=link.timestamp)\ .update(title=output) else: # if no content was returned, dont save a title (because it might be a temporary error) if not html: raise ArchiveError('Unable to detect page title') # output = html[:128] # use first bit of content as the title output = link.base_url # use the filename as the title (better UX) except Exception as err: status = 'failed' output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=CURL_VERSION, output=output, status=status, **timer.stats, )
download full site using wget
def save_wget(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult: """download full site using wget""" out_dir = out_dir or link.link_dir if SAVE_WARC: warc_dir = out_dir / "warc" warc_dir.mkdir(exist_ok=True) warc_path = warc_dir / str(int(datetime.now(timezone.utc).timestamp())) # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html output: ArchiveOutput = None # later options take precedence options = [ *WGET_ARGS, *WGET_EXTRA_ARGS, '--timeout={}'.format(timeout), *(['--restrict-file-names={}'.format(RESTRICT_FILE_NAMES)] if RESTRICT_FILE_NAMES else []), *(['--warc-file={}'.format(str(warc_path))] if SAVE_WARC else []), *(['--page-requisites'] if SAVE_WGET_REQUISITES else []), *(['--user-agent={}'.format(WGET_USER_AGENT)] if WGET_USER_AGENT else []), *(['--load-cookies', str(COOKIES_FILE)] if COOKIES_FILE else []), *(['--compression=auto'] if WGET_AUTO_COMPRESSION else []), *([] if SAVE_WARC else ['--timestamping']), *([] if CHECK_SSL_VALIDITY else ['--no-check-certificate', '--no-hsts']), # '--server-response', # print headers for better error parsing ] cmd = [ WGET_BINARY, *dedupe(options), link.url, ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: result = run(cmd, cwd=str(out_dir), timeout=timeout) output = wget_output_path(link) # parse out number of files downloaded from last line of stderr: # "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)" output_tail = [ line.strip() for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:] if line.strip() ] files_downloaded = ( int(output_tail[-1].strip().split(' ', 2)[1] or 0) if 'Downloaded:' in output_tail[-1] else 0 ) hints = ( 'Got wget response code: {}.'.format(result.returncode), *output_tail, ) # Check for common failure cases if (result.returncode > 0 and files_downloaded < 1) or output is None: if b'403: Forbidden' in result.stderr: raise ArchiveError('403 Forbidden (try changing WGET_USER_AGENT)', hints) if b'404: Not Found' in result.stderr: raise ArchiveError('404 Not Found', hints) if b'ERROR 500: Internal Server Error' in result.stderr: raise ArchiveError('500 Internal Server Error', hints) raise ArchiveError('Wget failed or got an error from the server', hints) if (out_dir / output).exists(): chmod_file(output, cwd=str(out_dir)) else: print(f' {out_dir}/{output}') raise ArchiveError('Failed to find wget output after running', hints) except Exception as err: status = 'failed' output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=WGET_VERSION, output=output, status=status, **timer.stats, )
calculate the path to the wgetted .html file, since wget may adjust some paths to be different than the base_url path. See docs on wget --adjust-extension (-E)
def wget_output_path(link: Link) -> Optional[str]: """calculate the path to the wgetted .html file, since wget may adjust some paths to be different than the base_url path. See docs on wget --adjust-extension (-E) """ # Wget downloads can save in a number of different ways depending on the url: # https://example.com # > example.com/index.html # https://example.com?v=zzVa_tX1OiI # > example.com/index.html?v=zzVa_tX1OiI.html # https://www.example.com/?v=zzVa_tX1OiI # > example.com/index.html?v=zzVa_tX1OiI.html # https://example.com/abc # > example.com/abc.html # https://example.com/abc/ # > example.com/abc/index.html # https://example.com/abc?v=zzVa_tX1OiI.html # > example.com/abc?v=zzVa_tX1OiI.html # https://example.com/abc/?v=zzVa_tX1OiI.html # > example.com/abc/index.html?v=zzVa_tX1OiI.html # https://example.com/abc/test.html # > example.com/abc/test.html # https://example.com/abc/test?v=zzVa_tX1OiI # > example.com/abc/test?v=zzVa_tX1OiI.html # https://example.com/abc/test/?v=zzVa_tX1OiI # > example.com/abc/test/index.html?v=zzVa_tX1OiI.html # There's also lots of complexity around how the urlencoding and renaming # is done for pages with query and hash fragments or extensions like shtml / htm / php / etc # Since the wget algorithm for -E (appending .html) is incredibly complex # and there's no way to get the computed output path from wget # in order to avoid having to reverse-engineer how they calculate it, # we just look in the output folder read the filename wget used from the filesystem full_path = without_fragment(without_query(path(link.url))).strip('/') search_dir = Path(link.link_dir) / domain(link.url).replace(":", "+") / urldecode(full_path) for _ in range(4): if search_dir.exists(): if search_dir.is_dir(): html_files = [ f for f in search_dir.iterdir() if re.search(".+\\.[Ss]?[Hh][Tt][Mm][Ll]?$", str(f), re.I | re.M) ] if html_files: return str(html_files[0].relative_to(link.link_dir)) # sometimes wget'd URLs have no ext and return non-html # e.g. /some/example/rss/all -> some RSS XML content) # /some/other/url.o4g -> some binary unrecognized ext) # test this with archivebox add --depth=1 https://getpocket.com/users/nikisweeting/feed/all last_part_of_url = urldecode(full_path.rsplit('/', 1)[-1]) for file_present in search_dir.iterdir(): if file_present == last_part_of_url: return str((search_dir / file_present).relative_to(link.link_dir)) # Move up one directory level search_dir = search_dir.parent if str(search_dir) == link.link_dir: break # check for literally any file present that isnt an empty folder domain_dir = Path(domain(link.url).replace(":", "+")) files_within = list((Path(link.link_dir) / domain_dir).glob('**/*.*')) if files_within: return str((domain_dir / files_within[-1]).relative_to(link.link_dir)) # fallback to just the domain dir search_dir = Path(link.link_dir) / domain(link.url).replace(":", "+") if search_dir.is_dir(): return domain(link.url).replace(":", "+") # fallback to just the domain dir without port search_dir = Path(link.link_dir) / domain(link.url).split(":", 1)[0] if search_dir.is_dir(): return domain(link.url).split(":", 1)[0] return None
download the DOM, PDF, and a screenshot into a folder named after the link's timestamp
def archive_link(link: Link, overwrite: bool=False, methods: Optional[Iterable[str]]=None, out_dir: Optional[Path]=None) -> Link: """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp""" # TODO: Remove when the input is changed to be a snapshot. Suboptimal approach. from core.models import Snapshot, ArchiveResult try: snapshot = Snapshot.objects.get(url=link.url) # TODO: This will be unnecessary once everything is a snapshot except Snapshot.DoesNotExist: snapshot = write_link_to_sql_index(link) active_methods = get_archive_methods_for_link(link) if methods: active_methods = [ method for method in active_methods if method[0] in methods ] out_dir = out_dir or Path(link.link_dir) try: is_new = not Path(out_dir).exists() if is_new: os.makedirs(out_dir) link = load_link_details(link, out_dir=out_dir) write_link_details(link, out_dir=out_dir, skip_sql_index=False) log_link_archiving_started(link, str(out_dir), is_new) link = link.overwrite(updated=datetime.now(timezone.utc)) stats = {'skipped': 0, 'succeeded': 0, 'failed': 0} start_ts = datetime.now(timezone.utc) for method_name, should_run, method_function in active_methods: try: if method_name not in link.history: link.history[method_name] = [] if should_run(link, out_dir, overwrite): log_archive_method_started(method_name) result = method_function(link=link, out_dir=out_dir) link.history[method_name].append(result) stats[result.status] += 1 log_archive_method_finished(result) write_search_index(link=link, texts=result.index_texts) ArchiveResult.objects.create(snapshot=snapshot, extractor=method_name, cmd=result.cmd, cmd_version=result.cmd_version, output=result.output, pwd=result.pwd, start_ts=result.start_ts, end_ts=result.end_ts, status=result.status) # bump the updated time on the main Snapshot here, this is critical # to be able to cache summaries of the ArchiveResults for a given # snapshot without having to load all the results from the DB each time. # (we use {Snapshot.id}-{Snapshot.updated} as the cache key and assume # ArchiveResults are unchanged as long as the updated timestamp is unchanged) snapshot.save() else: # print('{black} X {}{reset}'.format(method_name, **ANSI)) stats['skipped'] += 1 except Exception as e: # https://github.com/ArchiveBox/ArchiveBox/issues/984#issuecomment-1150541627 with open(ERROR_LOG, "a", encoding='utf-8') as f: command = ' '.join(sys.argv) ts = datetime.now(timezone.utc).strftime('%Y-%m-%d__%H:%M:%S') f.write(("\n" + 'Exception in archive_methods.save_{}(Link(url={})) command={}; ts={}'.format( method_name, link.url, command, ts ) + "\n" + str(e) + "\n")) #f.write(f"\n> {command}; ts={ts} version={config['VERSION']} docker={config['IN_DOCKER']} is_tty={config['IS_TTY']}\n") # print(f' ERROR: {method_name} {e.__class__.__name__}: {e} {getattr(e, "hints", "")}', ts, link.url, command) raise Exception('Exception in archive_methods.save_{}(Link(url={}))'.format( method_name, link.url, )) from e # print(' ', stats) try: latest_title = link.history['title'][-1].output.strip() if latest_title and len(latest_title) >= len(link.title or ''): link = link.overwrite(title=latest_title) except Exception: pass write_link_details(link, out_dir=out_dir, skip_sql_index=False) log_link_archiving_finished(link, out_dir, is_new, stats, start_ts) except KeyboardInterrupt: try: write_link_details(link, out_dir=link.link_dir) except: pass raise except Exception as err: print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err)) raise return link
parse an archive index html file and return the list of urls
def parse_html_main_index(out_dir: Path=OUTPUT_DIR) -> Iterator[str]: """parse an archive index html file and return the list of urls""" index_path = Path(out_dir) / HTML_INDEX_FILENAME if index_path.exists(): with open(index_path, 'r', encoding='utf-8') as f: for line in f: if 'class="link-url"' in line: yield line.split('"')[1] return ()
render the template for the entire main index
def main_index_template(links: List[Link], template: str=MAIN_INDEX_TEMPLATE) -> str: """render the template for the entire main index""" return render_django_template(template, { 'version': VERSION, 'git_sha': VERSION, # not used anymore, but kept for backwards compatibility 'num_links': str(len(links)), 'date_updated': datetime.now(timezone.utc).strftime('%Y-%m-%d'), 'time_updated': datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M'), 'links': [link._asdict(extended=True) for link in links], 'FOOTER_INFO': FOOTER_INFO, })
render a given html template string with the given template content
def render_django_template(template: str, context: Mapping[str, str]) -> str: """render a given html template string with the given template content""" from django.template.loader import render_to_string return render_to_string(template, context)
parse an archive index json file and return the list of links
def parse_json_main_index(out_dir: Path=OUTPUT_DIR) -> Iterator[Link]: """parse an archive index json file and return the list of links""" index_path = Path(out_dir) / JSON_INDEX_FILENAME if index_path.exists(): with open(index_path, 'r', encoding='utf-8') as f: try: links = pyjson.load(f)['links'] if links: Link.from_json(links[0]) except Exception as err: print(" {lightyellow}! Found an index.json in the project root but couldn't load links from it: {} {}".format( err.__class__.__name__, err, **ANSI, )) return () for link_json in links: try: yield Link.from_json(link_json) except KeyError: try: detail_index_path = Path(OUTPUT_DIR) / ARCHIVE_DIR_NAME / link_json['timestamp'] yield parse_json_link_details(str(detail_index_path)) except KeyError: # as a last effort, try to guess the missing values out of existing ones try: yield Link.from_json(link_json, guess=True) except KeyError: print(" {lightyellow}! Failed to load the index.json from {}".format(detail_index_path, **ANSI)) continue return ()
write a json file with some info about the link
def write_json_link_details(link: Link, out_dir: Optional[str]=None) -> None: """write a json file with some info about the link""" out_dir = out_dir or link.link_dir path = Path(out_dir) / JSON_INDEX_FILENAME atomic_write(str(path), link._asdict(extended=True))
load the json link index from a given directory
def parse_json_link_details(out_dir: Union[Path, str], guess: Optional[bool]=False) -> Optional[Link]: """load the json link index from a given directory""" existing_index = Path(out_dir) / JSON_INDEX_FILENAME if existing_index.exists(): with open(existing_index, 'r', encoding='utf-8') as f: try: link_json = pyjson.load(f) return Link.from_json(link_json, guess) except pyjson.JSONDecodeError: pass return None
read through all the archive data folders and return the parsed links
def parse_json_links_details(out_dir: Union[Path, str]) -> Iterator[Link]: """read through all the archive data folders and return the parsed links""" for entry in os.scandir(Path(out_dir) / ARCHIVE_DIR_NAME): if entry.is_dir(follow_symlinks=True): if (Path(entry.path) / 'index.json').exists(): try: link = parse_json_link_details(entry.path) except KeyError: link = None if link: yield link
deterministially merge two links, favoring longer field values over shorter, and "cleaner" values over worse ones.
def merge_links(a: Link, b: Link) -> Link: """deterministially merge two links, favoring longer field values over shorter, and "cleaner" values over worse ones. """ assert a.base_url == b.base_url, f'Cannot merge two links with different URLs ({a.base_url} != {b.base_url})' # longest url wins (because a fuzzy url will always be shorter) url = a.url if len(a.url) > len(b.url) else b.url # best title based on length and quality possible_titles = [ title for title in (a.title, b.title) if title and title.strip() and '://' not in title ] title = None if len(possible_titles) == 2: title = max(possible_titles, key=lambda t: len(t)) elif len(possible_titles) == 1: title = possible_titles[0] # earliest valid timestamp timestamp = ( a.timestamp if float(a.timestamp or 0) < float(b.timestamp or 0) else b.timestamp ) # all unique, truthy tags tags_set = ( set(tag.strip() for tag in (a.tags or '').split(',')) | set(tag.strip() for tag in (b.tags or '').split(',')) ) tags = ','.join(tags_set) or None # all unique source entries sources = list(set(a.sources + b.sources)) # all unique history entries for the combined archive methods all_methods = set(list(a.history.keys()) + list(a.history.keys())) history = { method: (a.history.get(method) or []) + (b.history.get(method) or []) for method in all_methods } for method in all_methods: deduped_jsons = { pyjson.dumps(result, sort_keys=True, cls=ExtendedEncoder) for result in history[method] } history[method] = list(reversed(sorted( (ArchiveResult.from_json(pyjson.loads(result)) for result in deduped_jsons), key=lambda result: result.start_ts, ))) return Link( url=url, timestamp=timestamp, title=title, tags=tags, sources=sources, history=history, )
remove chrome://, about:// or other schemed links that cant be archived
def archivable_links(links: Iterable[Link]) -> Iterable[Link]: """remove chrome://, about:// or other schemed links that cant be archived""" for link in links: try: urlparse(link.url) except ValueError: continue if scheme(link.url) not in ('http', 'https', 'ftp'): continue if URL_DENYLIST_PTN and URL_DENYLIST_PTN.search(link.url): continue if URL_ALLOWLIST_PTN and (not URL_ALLOWLIST_PTN.search(link.url)): continue yield link
ensures that all non-duplicate links have monotonically increasing timestamps
def fix_duplicate_links(sorted_links: Iterable[Link]) -> Iterable[Link]: """ ensures that all non-duplicate links have monotonically increasing timestamps """ # from core.models import Snapshot unique_urls: OrderedDict[str, Link] = OrderedDict() for link in sorted_links: if link.url in unique_urls: # merge with any other links that share the same url link = merge_links(unique_urls[link.url], link) unique_urls[link.url] = link return unique_urls.values()
resolve duplicate timestamps by appending a decimal 1234, 1234 -> 1234.1, 1234.2
def lowest_uniq_timestamp(used_timestamps: OrderedDict, timestamp: str) -> str: """resolve duplicate timestamps by appending a decimal 1234, 1234 -> 1234.1, 1234.2""" timestamp = timestamp.split('.')[0] nonce = 0 # first try 152323423 before 152323423.0 if timestamp not in used_timestamps: return timestamp new_timestamp = '{}.{}'.format(timestamp, nonce) while new_timestamp in used_timestamps: nonce += 1 new_timestamp = '{}.{}'.format(timestamp, nonce) return new_timestamp
Writes links to sqlite3 file for a given list of links
def write_main_index(links: List[Link], out_dir: Path=OUTPUT_DIR) -> None: """Writes links to sqlite3 file for a given list of links""" log_indexing_process_started(len(links)) try: with timed_index_update(out_dir / SQL_INDEX_FILENAME): write_sql_main_index(links, out_dir=out_dir) os.chmod(out_dir / SQL_INDEX_FILENAME, int(OUTPUT_PERMISSIONS, base=8)) # set here because we don't write it with atomic writes except (KeyboardInterrupt, SystemExit): stderr('[!] Warning: Still writing index to disk...', color='lightyellow') stderr(' Run archivebox init to fix any inconsistencies from an ungraceful exit.') with timed_index_update(out_dir / SQL_INDEX_FILENAME): write_sql_main_index(links, out_dir=out_dir) os.chmod(out_dir / SQL_INDEX_FILENAME, int(OUTPUT_PERMISSIONS, base=8)) # set here because we don't write it with atomic writes raise SystemExit(0) log_indexing_process_finished()
parse and load existing index with any new links from import_path merged in
def load_main_index(out_dir: Path=OUTPUT_DIR, warn: bool=True) -> List[Link]: """parse and load existing index with any new links from import_path merged in""" from core.models import Snapshot try: return Snapshot.objects.all().only('id') except (KeyboardInterrupt, SystemExit): raise SystemExit(0)
Given a list of in-memory Links, dedupe and merge them with any conflicting Snapshots in the DB.
def fix_duplicate_links_in_index(snapshots: QuerySet, links: Iterable[Link]) -> Iterable[Link]: """ Given a list of in-memory Links, dedupe and merge them with any conflicting Snapshots in the DB. """ unique_urls: OrderedDict[str, Link] = OrderedDict() for link in links: index_link = snapshots.filter(url=link.url) if index_link: link = merge_links(index_link[0].as_link(), link) unique_urls[link.url] = link return unique_urls.values()
The validation of links happened at a different stage. This method will focus on actual deduplication and timestamp fixing.
def dedupe_links(snapshots: QuerySet, new_links: List[Link]) -> List[Link]: """ The validation of links happened at a different stage. This method will focus on actual deduplication and timestamp fixing. """ # merge existing links in out_dir and new links dedup_links = fix_duplicate_links_in_index(snapshots, new_links) new_links = [ link for link in new_links if not snapshots.filter(url=link.url).exists() ] dedup_links_dict = {link.url: link for link in dedup_links} # Replace links in new_links with the dedup version for i in range(len(new_links)): if new_links[i].url in dedup_links_dict.keys(): new_links[i] = dedup_links_dict[new_links[i].url] log_deduping_finished(len(new_links)) return new_links
check for an existing link archive in the given directory, and load+merge it into the given link dict
def load_link_details(link: Link, out_dir: Optional[str]=None) -> Link: """check for an existing link archive in the given directory, and load+merge it into the given link dict """ out_dir = out_dir or link.link_dir existing_link = parse_json_link_details(out_dir) if existing_link: return merge_links(existing_link, link) return link
indexed links without checking archive status or data directory validity
def get_indexed_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """indexed links without checking archive status or data directory validity""" links = (snapshot.as_link() for snapshot in snapshots.iterator()) return { link.link_dir: link for link in links }
indexed links that are archived with a valid data directory
def get_archived_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """indexed links that are archived with a valid data directory""" links = (snapshot.as_link() for snapshot in snapshots.iterator()) return { link.link_dir: link for link in filter(is_archived, links) }
indexed links that are unarchived with no data directory or an empty data directory
def get_unarchived_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """indexed links that are unarchived with no data directory or an empty data directory""" links = (snapshot.as_link() for snapshot in snapshots.iterator()) return { link.link_dir: link for link in filter(is_unarchived, links) }
dirs that actually exist in the archive/ folder
def get_present_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that actually exist in the archive/ folder""" all_folders = {} for entry in (out_dir / ARCHIVE_DIR_NAME).iterdir(): if entry.is_dir(): link = None try: link = parse_json_link_details(entry.path) except Exception: pass all_folders[entry.name] = link return all_folders
dirs with a valid index matched to the main index and archived content
def get_valid_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs with a valid index matched to the main index and archived content""" links = [snapshot.as_link_with_details() for snapshot in snapshots.iterator()] return { link.link_dir: link for link in filter(is_valid, links) }
dirs that are invalid for any reason: corrupted/duplicate/orphaned/unrecognized
def get_invalid_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that are invalid for any reason: corrupted/duplicate/orphaned/unrecognized""" duplicate = get_duplicate_folders(snapshots, out_dir=OUTPUT_DIR) orphaned = get_orphaned_folders(snapshots, out_dir=OUTPUT_DIR) corrupted = get_corrupted_folders(snapshots, out_dir=OUTPUT_DIR) unrecognized = get_unrecognized_folders(snapshots, out_dir=OUTPUT_DIR) return {**duplicate, **orphaned, **corrupted, **unrecognized}
dirs that conflict with other directories that have the same link URL or timestamp
def get_duplicate_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that conflict with other directories that have the same link URL or timestamp""" by_url = {} by_timestamp = {} duplicate_folders = {} data_folders = ( str(entry) for entry in (Path(out_dir) / ARCHIVE_DIR_NAME).iterdir() if entry.is_dir() and not snapshots.filter(timestamp=entry.name).exists() ) for path in chain(snapshots.iterator(), data_folders): link = None if type(path) is not str: path = path.as_link().link_dir try: link = parse_json_link_details(path) except Exception: pass if link: # link folder has same timestamp as different link folder by_timestamp[link.timestamp] = by_timestamp.get(link.timestamp, 0) + 1 if by_timestamp[link.timestamp] > 1: duplicate_folders[path] = link # link folder has same url as different link folder by_url[link.url] = by_url.get(link.url, 0) + 1 if by_url[link.url] > 1: duplicate_folders[path] = link return duplicate_folders
dirs that contain a valid index but aren't listed in the main index
def get_orphaned_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that contain a valid index but aren't listed in the main index""" orphaned_folders = {} for entry in (Path(out_dir) / ARCHIVE_DIR_NAME).iterdir(): if entry.is_dir(): link = None try: link = parse_json_link_details(str(entry)) except Exception: pass if link and not snapshots.filter(timestamp=entry.name).exists(): # folder is a valid link data dir with index details, but it's not in the main index orphaned_folders[str(entry)] = link return orphaned_folders