response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Auto-detects which docker context to use. :return: name of the docker context to use
def autodetect_docker_context(): """ Auto-detects which docker context to use. :return: name of the docker context to use """ result = run_command( ["docker", "context", "ls", "--format=json"], capture_output=True, check=False, text=True, ) if result.returncode != 0: get_console().print("[warning]Could not detect docker builder. Using default.[/]") return "default" try: context_dicts = json.loads(result.stdout) if isinstance(context_dicts, dict): context_dicts = [context_dicts] except json.decoder.JSONDecodeError: context_dicts = (json.loads(line) for line in result.stdout.splitlines() if line.strip()) known_contexts = {info["Name"]: info for info in context_dicts} if not known_contexts: get_console().print("[warning]Could not detect docker builder. Using default.[/]") return "default" for preferred_context_name in PREFERRED_CONTEXTS: try: context = known_contexts[preferred_context_name] except KeyError: continue # On Windows, some contexts are used for WSL2. We don't want to use those. if context["DockerEndpoint"] == "npipe:////./pipe/dockerDesktopLinuxEngine": continue get_console().print(f"[info]Using {preferred_context_name!r} as context.[/]") return preferred_context_name fallback_context = next(iter(known_contexts)) get_console().print( f"[warning]Could not use any of the preferred docker contexts {PREFERRED_CONTEXTS}.\n" f"Using {fallback_context} as context.[/]" ) return fallback_context
Executes command in shell. When you want to execute a script/bash command inside the CI container and want to use `enter_shell` for this purpose, the helper methods sets the following parameters of shell_params: * backend - to force sqlite backend * clean_sql_db=True - to clean the sqlite DB * executor - to force SequentialExecutor * forward_ports=False - to avoid forwarding ports from the container to the host - again that will allow to avoid clashes with other commands and opened breeze shell * project_name - to avoid name clashes with default "breeze" project name used * quiet=True - avoid displaying all "interactive" parts of Breeze: ASCIIART, CHEATSHEET, some diagnostics * skip_environment_initialization - to avoid initializing interactive environment * skip_image_upgrade_check - to avoid checking if the image is up to date if command is passed as parameter, extra_args - to pass the command to execute in the shell :param shell_params: shell parameters to use :param project_name: Name of the project to use. This avoids name clashes with default 'breeze" project name used - this way you will be able to run the command in parallel to regular "breeze" shell opened in parallel :param command:
def execute_command_in_shell( shell_params: ShellParams, project_name: str, command: str | None = None, output: Output | None = None ) -> RunCommandResult: """Executes command in shell. When you want to execute a script/bash command inside the CI container and want to use `enter_shell` for this purpose, the helper methods sets the following parameters of shell_params: * backend - to force sqlite backend * clean_sql_db=True - to clean the sqlite DB * executor - to force SequentialExecutor * forward_ports=False - to avoid forwarding ports from the container to the host - again that will allow to avoid clashes with other commands and opened breeze shell * project_name - to avoid name clashes with default "breeze" project name used * quiet=True - avoid displaying all "interactive" parts of Breeze: ASCIIART, CHEATSHEET, some diagnostics * skip_environment_initialization - to avoid initializing interactive environment * skip_image_upgrade_check - to avoid checking if the image is up to date if command is passed as parameter, extra_args - to pass the command to execute in the shell :param shell_params: shell parameters to use :param project_name: Name of the project to use. This avoids name clashes with default 'breeze" project name used - this way you will be able to run the command in parallel to regular "breeze" shell opened in parallel :param command: """ shell_params.backend = "sqlite" shell_params.executor = SEQUENTIAL_EXECUTOR shell_params.forward_ports = False shell_params.project_name = project_name shell_params.quiet = True shell_params.skip_environment_initialization = True shell_params.skip_image_upgrade_check = True if get_verbose(): get_console().print(f"[warning]Backend forced to: sqlite and {SEQUENTIAL_EXECUTOR}[/]") get_console().print("[warning]Sqlite DB is cleaned[/]") get_console().print(f"[warning]Executor forced to {SEQUENTIAL_EXECUTOR}[/]") get_console().print("[warning]Disabled port forwarding[/]") get_console().print(f"[warning]Project name set to: {project_name}[/]") get_console().print("[warning]Forced quiet mode[/]") get_console().print("[warning]Forced skipping environment initialization[/]") get_console().print("[warning]Forced skipping upgrade check[/]") if command: shell_params.extra_args = (command,) if get_verbose(): get_console().print(f"[info]Command to execute: '{command}'[/]") return enter_shell(shell_params, output=output)
Executes entering shell using the parameters passed as kwargs: * checks if docker version is good * checks if docker-compose version is good * updates kwargs with cached parameters * displays ASCIIART and CHEATSHEET unless disabled * build ShellParams from the updated kwargs * shuts down existing project * executes the command to drop the user to Breeze shell
def enter_shell(shell_params: ShellParams, output: Output | None = None) -> RunCommandResult: """ Executes entering shell using the parameters passed as kwargs: * checks if docker version is good * checks if docker-compose version is good * updates kwargs with cached parameters * displays ASCIIART and CHEATSHEET unless disabled * build ShellParams from the updated kwargs * shuts down existing project * executes the command to drop the user to Breeze shell """ perform_environment_checks(quiet=shell_params.quiet) fix_ownership_using_docker(quiet=shell_params.quiet) cleanup_python_generated_files() if read_from_cache_file("suppress_asciiart") is None and not shell_params.quiet: get_console().print(ASCIIART, style=ASCIIART_STYLE) if read_from_cache_file("suppress_cheatsheet") is None and not shell_params.quiet: get_console().print(CHEATSHEET, style=CHEATSHEET_STYLE) if shell_params.use_airflow_version: # in case you use specific version of Airflow, you want to bring airflow down automatically before # using it. This prevents the problem that if you have newer DB, airflow will not know how # to migrate to it and fail with "Can't locate revision identified by 'xxxx'". get_console().print( f"[warning]Bringing the project down as {shell_params.use_airflow_version} " f"airflow version is used[/]" ) bring_compose_project_down(preserve_volumes=False, shell_params=shell_params) if shell_params.backend == "sqlite" and shell_params.executor != SEQUENTIAL_EXECUTOR: get_console().print( f"\n[warning]backend: sqlite is not " f"compatible with executor: {shell_params.executor}. " f"Changing the executor to {SEQUENTIAL_EXECUTOR}.\n" ) shell_params.executor = SEQUENTIAL_EXECUTOR if shell_params.restart: bring_compose_project_down(preserve_volumes=False, shell_params=shell_params) if shell_params.include_mypy_volume: create_mypy_volume_if_needed() shell_params.print_badge_info() cmd = ["docker", "compose"] if shell_params.quiet: cmd.extend(["--progress", "quiet"]) if shell_params.project_name: cmd.extend(["--project-name", shell_params.project_name]) cmd.extend(["run", "--service-ports", "--rm"]) if shell_params.tty == "disabled": cmd.append("--no-TTY") elif shell_params.tty == "enabled": cmd.append("--tty") cmd.append("airflow") cmd_added = shell_params.command_passed if cmd_added is not None: cmd.extend(["-c", cmd_added]) if "arm64" in DOCKER_DEFAULT_PLATFORM: if shell_params.backend == "mysql": get_console().print("\n[warn]MySQL use MariaDB client binaries on ARM architecture.[/]\n") if "openlineage" in shell_params.integration or "all" in shell_params.integration: if shell_params.backend != "postgres" or shell_params.postgres_version not in ["12", "13", "14"]: get_console().print( "\n[error]Only PostgreSQL 12, 13, and 14 are supported " "as a backend with OpenLineage integration via Breeze[/]\n" ) sys.exit(1) command_result = run_command( cmd, text=True, check=False, env=shell_params.env_variables_for_docker_commands, output=output, output_outside_the_group=True, ) if command_result.returncode == 0: return command_result else: get_console().print(f"[red]Error {command_result.returncode} returned[/]") if get_verbose(): get_console().print(command_result.stderr) return command_result
Displays summary of errors
def display_errors_summary(build_errors: dict[str, list[DocBuildError]]) -> None: """Displays summary of errors""" console.print() console.print("[red]" + "#" * 30 + " Start docs build errors summary " + "#" * 30 + "[/]") console.print() for package_name, errors in build_errors.items(): if package_name: console.print("=" * 30 + f" [info]{package_name}[/] " + "=" * 30) else: console.print("=" * 30, " [info]General[/] ", "=" * 30) for warning_no, error in enumerate(sorted(errors), 1): console.print("-" * 30, f"[red]Error {warning_no:3}[/]", "-" * 20) console.print(error.message) console.print() if error.file_path and not error.file_path.endswith("<unknown>") and error.line_no: console.print( f"File path: {os.path.relpath(error.file_path, start=DOCS_DIR)} ({error.line_no})" ) console.print() console.print(prepare_code_snippet(error.file_path, error.line_no)) elif error.file_path: console.print(f"File path: {error.file_path}") console.print() console.print("[red]" + "#" * 30 + " End docs build errors summary " + "#" * 30 + "[/]") console.print()
Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of DocBuildErrors.
def parse_sphinx_warnings(warning_text: str, docs_dir: str) -> list[DocBuildError]: """ Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of DocBuildErrors. """ sphinx_build_errors = [] for sphinx_warning in warning_text.splitlines(): if not sphinx_warning: continue warning_parts = sphinx_warning.split(":", 2) if len(warning_parts) == 3: try: sphinx_build_errors.append( DocBuildError( file_path=os.path.join(docs_dir, warning_parts[0]), line_no=int(warning_parts[1]), message=warning_parts[2], ) ) except Exception: # If an exception occurred while parsing the warning message, display the raw warning message. sphinx_build_errors.append( DocBuildError(file_path=None, line_no=None, message=sphinx_warning) ) else: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) return sphinx_build_errors
Include only representative combos from the matrix of the two lists - making sure that each of the elements contributing is present at least once. :param list_1: first list :param list_2: second list :return: list of combinations with guaranteed at least one element from each of the list
def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]: """ Include only representative combos from the matrix of the two lists - making sure that each of the elements contributing is present at least once. :param list_1: first list :param list_2: second list :return: list of combinations with guaranteed at least one element from each of the list """ all_selected_combinations: list[tuple[str, str]] = [] for i in range(max(len(list_1), len(list_2))): all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)])) return all_selected_combinations
Return exclusion lists of elements that should be excluded from the matrix of the two list of items if what's left should be representative list of combos (i.e. each item from both lists, has to be present at least once in the combos). :param list_1: first list :param list_2: second list :return: list of exclusions = list 1 x list 2 - representative_combos
def excluded_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]: """ Return exclusion lists of elements that should be excluded from the matrix of the two list of items if what's left should be representative list of combos (i.e. each item from both lists, has to be present at least once in the combos). :param list_1: first list :param list_2: second list :return: list of exclusions = list 1 x list 2 - representative_combos """ all_combos: list[tuple[str, str]] = list(itertools.product(list_1, list_2)) return [item for item in all_combos if item not in set(representative_combos(list_1, list_2))]
Downloads a file from GitHub repository of Apache Airflow :param tag: tag to download from :param path: path of the file relative to the repository root :param output_file: Path where the file should be downloaded :return: whether the file was successfully downloaded (False if the file is missing or error occurred)
def download_file_from_github(tag: str, path: str, output_file: Path) -> bool: """ Downloads a file from GitHub repository of Apache Airflow :param tag: tag to download from :param path: path of the file relative to the repository root :param output_file: Path where the file should be downloaded :return: whether the file was successfully downloaded (False if the file is missing or error occurred) """ import requests url = f"https://raw.githubusercontent.com/apache/airflow/{tag}/{path}" get_console().print(f"[info]Downloading {url} to {output_file}") if not get_dry_run(): response = requests.get(url) if response.status_code == 404: get_console().print(f"[warning]The {url} has not been found. Skipping") return False if response.status_code != 200: get_console().print( f"[error]The {url} could not be downloaded. Status code {response.status_code}" ) return False output_file.write_bytes(response.content) get_console().print(f"[success]Downloaded {url} to {output_file}") return True
Gets list of active Airflow versions from GitHub. :param confirm: if True, will ask the user before proceeding with the versions found :return: tuple: list of active Airflow versions and dict of Airflow release dates (in iso format)
def get_active_airflow_versions(confirm: bool = True) -> tuple[list[str], dict[str, str]]: """ Gets list of active Airflow versions from GitHub. :param confirm: if True, will ask the user before proceeding with the versions found :return: tuple: list of active Airflow versions and dict of Airflow release dates (in iso format) """ from git import GitCommandError, Repo from packaging.version import Version airflow_release_dates: dict[str, str] = {} get_console().print( "\n[warning]Make sure you have `apache` remote added pointing to apache/airflow repository\n" ) get_console().print("[info]Fetching all released Airflow 2 versions from GitHub[/]\n") repo = Repo(AIRFLOW_SOURCES_ROOT) all_active_tags: list[str] = [] try: ref_tags = repo.git.ls_remote("--tags", "apache").splitlines() except GitCommandError as ex: get_console().print( "[error]Could not fetch tags from `apache` remote! Make sure to have it configured.\n" ) get_console().print(f"{ex}\n") get_console().print( "[info]You can add apache remote with on of those commands (depend which protocol you use):\n" " * git remote add apache https://github.com/apache/airflow.git\n" " * git remote add apache [email protected]:apache/airflow.git\n" ) sys.exit(1) tags = [tag.split("refs/tags/")[1].strip() for tag in ref_tags if "refs/tags/" in tag] for tag in tags: match = ACTIVE_TAG_MATCH.match(tag) if match and match.group(1) == "2": all_active_tags.append(tag) airflow_versions = sorted(all_active_tags, key=Version) for version in airflow_versions: date = get_tag_date(version) if not date: get_console().print("[error]Error fetching tag date for Airflow {version}") sys.exit(1) airflow_release_dates[version] = date get_console().print("[info]All Airflow 2 versions") for version in airflow_versions: get_console().print(f" {version}: [info]{airflow_release_dates[version]}[/]") if confirm: answer = user_confirm( "Should we continue with those versions?", quit_allowed=False, default_answer=Answer.YES ) if answer == Answer.NO: get_console().print("[red]Aborting[/]") sys.exit(1) return airflow_versions, airflow_release_dates
Downloads constraints file from GitHub repository of Apache Airflow :param airflow_version: airflow version :param python_version: python version :param include_provider_dependencies: whether to include provider dependencies :param output_file: the file where to store the constraint file :return: true if the file was successfully downloaded
def download_constraints_file( airflow_version: str, python_version: str, include_provider_dependencies: bool, output_file: Path ) -> bool: """ Downloads constraints file from GitHub repository of Apache Airflow :param airflow_version: airflow version :param python_version: python version :param include_provider_dependencies: whether to include provider dependencies :param output_file: the file where to store the constraint file :return: true if the file was successfully downloaded """ if include_provider_dependencies: constraints_file_path = f"constraints-{python_version}.txt" else: constraints_file_path = f"constraints-no-providers-{python_version}.txt" constraints_tag = f"constraints-{airflow_version}" return download_file_from_github( tag=constraints_tag, path=constraints_file_path, output_file=output_file, )
Returns UTC timestamp of the tag in the repo in iso time format 8601 :param tag: tag to get date for :return: iso time format 8601 of the tag date
def get_tag_date(tag: str) -> str | None: """ Returns UTC timestamp of the tag in the repo in iso time format 8601 :param tag: tag to get date for :return: iso time format 8601 of the tag date """ from git import Repo repo = Repo(AIRFLOW_SOURCES_ROOT) try: tag_object = repo.tags[tag].object except IndexError: get_console().print(f"[warning]Tag {tag} not found in the repository") return None timestamp: int = ( tag_object.committed_date if hasattr(tag_object, "committed_date") else tag_object.tagged_date ) return datetime.fromtimestamp(timestamp, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
Get architecture in the form of Tuple: standardized architecture, original platform
def get_host_architecture() -> tuple[Architecture | None, str]: """Get architecture in the form of Tuple: standardized architecture, original platform""" machine = platform.machine() return _MACHINE_TO_ARCHITECTURE.get(machine.lower()), machine
Run image pull in parallel
def run_pull_in_parallel( parallelism: int, skip_cleanup: bool, debug_resources: bool, image_params_list: list[BuildCiParams] | list[BuildProdParams], python_version_list: list[str], verify: bool, include_success_outputs: bool, tag_as_latest: bool, wait_for_image: bool, extra_pytest_args: tuple, ): """Run image pull in parallel""" all_params = [f"Image {image_params.python}" for image_params in image_params_list] with ci_group(f"Pull{'/verify' if verify else ''} for {python_version_list}"): with run_with_pool( parallelism=parallelism, all_params=all_params, debug_resources=debug_resources, progress_matcher=GenericRegexpProgressMatcher(DOCKER_PULL_PROGRESS_REGEXP, lines_to_search=15), ) as (pool, outputs): def get_right_method() -> Callable[..., tuple[int, str]]: if verify: return run_pull_and_verify_image else: return run_pull_image def get_kwds(index: int, image_param: BuildCiParams | BuildProdParams): d = { "image_params": image_param, "wait_for_image": wait_for_image, "tag_as_latest": tag_as_latest, "poll_time_seconds": 10.0, "output": outputs[index], } if verify: d["extra_pytest_args"] = extra_pytest_args return d results = [ pool.apply_async(get_right_method(), kwds=get_kwds(index, image_param)) for index, image_param in enumerate(image_params_list) ] check_async_run_results( results=results, success="All images pulled", outputs=outputs, include_success_outputs=include_success_outputs, skip_cleanup=skip_cleanup, )
Pull image specified. :param image_params: Image parameters. :param output: output to write to :param wait_for_image: whether we should wait for the image to be available :param tag_as_latest: tag the image as latest :param poll_time_seconds: what's the polling time between checks if images are there (default 10 s) :param max_time_minutes: what's the maximum time to wait for the image to be pulled (default 70 minutes) :return: Tuple of return code and description of the image pulled
def run_pull_image( image_params: CommonBuildParams, wait_for_image: bool, tag_as_latest: bool, output: Output | None, poll_time_seconds: float = 10.0, max_time_minutes: float = 70, ) -> tuple[int, str]: """ Pull image specified. :param image_params: Image parameters. :param output: output to write to :param wait_for_image: whether we should wait for the image to be available :param tag_as_latest: tag the image as latest :param poll_time_seconds: what's the polling time between checks if images are there (default 10 s) :param max_time_minutes: what's the maximum time to wait for the image to be pulled (default 70 minutes) :return: Tuple of return code and description of the image pulled """ get_console(output=output).print( f"\n[info]Pulling {image_params.image_type} image of airflow python version: " f"{image_params.python} image: {image_params.airflow_image_name_with_tag} " f"with wait for image: {wait_for_image} and max time to poll {max_time_minutes} minutes[/]\n" ) current_loop = 1 start_time = time.time() while True: command_to_run = ["docker", "pull", image_params.airflow_image_name_with_tag] command_result = run_command(command_to_run, check=False, output=output) if command_result.returncode == 0: command_result = run_command( ["docker", "inspect", image_params.airflow_image_name_with_tag, "-f", "{{.Size}}"], capture_output=True, output=output, text=True, check=False, ) if not get_dry_run(): if command_result.returncode == 0: image_size = int(command_result.stdout.strip()) if image_size == 0: get_console(output=output).print( "\n[error]The image size was 0 - image creation failed.[/]\n" ) return 1, f"Image Python {image_params.python}" else: get_console(output=output).print( "\n[error]There was an error pulling the size of the image. Failing.[/]\n" ) return ( command_result.returncode, f"Image Python {image_params.python}", ) if tag_as_latest: command_result = tag_image_as_latest(image_params=image_params, output=output) if command_result.returncode == 0 and isinstance(image_params, BuildCiParams): mark_image_as_refreshed(image_params) return command_result.returncode, f"Image Python {image_params.python}" if wait_for_image: if get_verbose() or get_dry_run(): get_console(output=output).print( f"\n[info]Waiting: #{current_loop} {image_params.airflow_image_name_with_tag}.[/]\n" ) time.sleep(poll_time_seconds) current_loop += 1 current_time = time.time() if (current_time - start_time) / 60 > max_time_minutes: get_console(output=output).print( f"\n[error]The image {image_params.airflow_image_name_with_tag} " f"did not appear in {max_time_minutes} minutes. Failing.[/]\n" ) return 1, f"Image Python {image_params.python}" continue else: get_console(output=output).print( f"\n[error]There was an error pulling the image {image_params.python}. Failing.[/]\n" ) return command_result.returncode, f"Image Python {image_params.python}"
Creates cluster config file and returns sockets keeping the ports bound. The sockets should be closed just before creating the cluster.
def set_random_cluster_ports(python: str, kubernetes_version: str, output: Output | None) -> None: """ Creates cluster config file and returns sockets keeping the ports bound. The sockets should be closed just before creating the cluster. """ forwarded_port_number = _get_free_port() api_server_port = _get_free_port() get_console(output=output).print( f"[info]Random ports: API: {api_server_port}, Web: {forwarded_port_number}" ) cluster_conf_path = get_kind_cluster_config_path(python=python, kubernetes_version=kubernetes_version) config = ( (AIRFLOW_SOURCES_ROOT / "scripts" / "ci" / "kubernetes" / "kind-cluster-conf.yaml") .read_text() .replace("{{FORWARDED_PORT_NUMBER}}", str(forwarded_port_number)) .replace("{{API_SERVER_PORT}}", str(api_server_port)) ) cluster_conf_path.write_text(config) get_console(output=output).print(f"[info]Config created in {cluster_conf_path}:\n") get_console(output=output).print(config) get_console(output=output).print("\n")
Check if the file hash is present in cache and its content has been modified. Optionally updates the hash. :param file_hash: hash of the current version of the file :param cache_path: path where the hash is stored :param update: whether to update hash if it is found different :return: True if the hash file was missing or hash has changed.
def check_md5checksum_in_cache_modified(file_hash: str, cache_path: Path, update: bool) -> bool: """ Check if the file hash is present in cache and its content has been modified. Optionally updates the hash. :param file_hash: hash of the current version of the file :param cache_path: path where the hash is stored :param update: whether to update hash if it is found different :return: True if the hash file was missing or hash has changed. """ if cache_path.exists(): old_md5_checksum_content = Path(cache_path).read_text() if old_md5_checksum_content.strip() != file_hash.strip(): if update: save_md5_file(cache_path, file_hash) return True else: if update: save_md5_file(cache_path, file_hash) return True return False
Generates md5 hash for the file.
def generate_md5(filename, file_size: int = 65536): """Generates md5 hash for the file.""" hash_md5 = hashlib.md5() with open(filename, "rb") as f: for file_chunk in iter(lambda: f.read(file_size), b""): hash_md5.update(file_chunk) return hash_md5.hexdigest()
Calculates checksums for all interesting files and stores the hashes in the md5sum_cache_dir. Optionally modifies the hashes. :param md5sum_cache_dir: directory where to store cached information :param update: whether to update the hashes :param skip_provider_dependencies_check: whether to skip regeneration of the provider dependencies :return: Tuple of two lists: modified and not-modified files
def calculate_md5_checksum_for_files( md5sum_cache_dir: Path, update: bool = False, skip_provider_dependencies_check: bool = False ) -> tuple[list[str], list[str]]: """ Calculates checksums for all interesting files and stores the hashes in the md5sum_cache_dir. Optionally modifies the hashes. :param md5sum_cache_dir: directory where to store cached information :param update: whether to update the hashes :param skip_provider_dependencies_check: whether to skip regeneration of the provider dependencies :return: Tuple of two lists: modified and not-modified files """ not_modified_files = [] modified_files = [] if not skip_provider_dependencies_check: modified_provider_yaml_files = [] for file in ALL_PROVIDER_YAML_FILES: # Only check provider yaml files once and save the result immediately. # If we need to regenerate the dependencies and they are not modified then # all is fine and we can save checksums for the new files if check_md5_sum_for_file(file, md5sum_cache_dir, True): modified_provider_yaml_files.append(file) if modified_provider_yaml_files: get_console().print( "[info]Attempting to generate provider dependencies. " f"{len(modified_provider_yaml_files)} provider.yaml file(s) changed since last check." ) if get_verbose(): get_console().print( [ os.fspath(file.relative_to(AIRFLOW_SOURCES_ROOT)) for file in modified_provider_yaml_files ] ) # Regenerate provider_dependencies.json result = run_command( [ sys.executable, os.fspath( AIRFLOW_SOURCES_ROOT / "scripts" / "ci" / "pre_commit" / "update_providers_dependencies.py" ), ], cwd=AIRFLOW_SOURCES_ROOT, check=False, ) if result.returncode != 0: sys.exit(result.returncode) for file in FILES_FOR_REBUILD_CHECK: is_modified = check_md5_sum_for_file(file, md5sum_cache_dir, update) if is_modified: modified_files.append(file) else: not_modified_files.append(file) return modified_files, not_modified_files
Checks if build is needed based on whether important files were modified. :param build_ci_params: parameters for the build :param md5sum_cache_dir: directory where cached md5 sums are stored :param skip_provider_dependencies_check: whether to skip regeneration of the provider dependencies :return: True if build is needed.
def md5sum_check_if_build_is_needed( build_ci_params: BuildCiParams, md5sum_cache_dir: Path, skip_provider_dependencies_check: bool ) -> bool: """ Checks if build is needed based on whether important files were modified. :param build_ci_params: parameters for the build :param md5sum_cache_dir: directory where cached md5 sums are stored :param skip_provider_dependencies_check: whether to skip regeneration of the provider dependencies :return: True if build is needed. """ modified_files, not_modified_files = calculate_md5_checksum_for_files( md5sum_cache_dir, update=False, skip_provider_dependencies_check=skip_provider_dependencies_check ) if modified_files: if build_ci_params.skip_image_upgrade_check: if build_ci_params.warn_image_upgrade_needed: get_console().print( "\n[warning]You are skipping the image upgrade check, but the image needs an upgrade. " "This might lead to out-dated results of the check![/]" ) get_console().print( f"[info]Consider running `breeze ci-image build --python {build_ci_params.python} " f"at earliest convenience![/]\n" ) return False get_console().print( f"[warning]The following important files are modified in {AIRFLOW_SOURCES_ROOT} " f"since last time image was built: [/]\n\n" ) for file in modified_files: get_console().print(f" * [info]{file}[/]") get_console().print("\n[warning]Likely CI image needs rebuild[/]\n") return True else: if build_ci_params.skip_image_upgrade_check: return False get_console().print( "[info]Docker image build is not needed for CI build as no important files are changed! " "You can add --force-build to force it[/]" ) return False
Load all data from providers files :return: A list containing the contents of all provider.yaml files.
def get_provider_packages_metadata() -> dict[str, dict[str, Any]]: """ Load all data from providers files :return: A list containing the contents of all provider.yaml files. """ if PROVIDER_METADATA: return PROVIDER_METADATA for provider_yaml_path in get_provider_yaml_paths(): refresh_provider_metadata_from_yaml_file(provider_yaml_path) return PROVIDER_METADATA
Validates provider info against the runtime schema. This way we check if the provider info in the packages is future-compatible. The Runtime Schema should only change when there is a major version change. :param provider_info: provider info to validate
def validate_provider_info_with_runtime_schema(provider_info: dict[str, Any]) -> None: """Validates provider info against the runtime schema. This way we check if the provider info in the packages is future-compatible. The Runtime Schema should only change when there is a major version change. :param provider_info: provider info to validate """ import jsonschema schema = json.loads(PROVIDER_RUNTIME_DATA_SCHEMA_PATH.read_text()) try: jsonschema.validate(provider_info, schema=schema) except jsonschema.ValidationError as ex: get_console().print( "[red]Error when validating schema. The schema must be compatible with " "[bold]'airflow/provider_info.schema.json'[/bold].\n" f"Original exception [bold]{type(ex).__name__}: {ex}[/]" ) raise SystemExit(1)
Retrieves provider info from the provider yaml file. :param provider_id: package id to retrieve provider.yaml from :return: provider_info dictionary
def get_provider_info_dict(provider_id: str) -> dict[str, Any]: """Retrieves provider info from the provider yaml file. :param provider_id: package id to retrieve provider.yaml from :return: provider_info dictionary """ provider_yaml_dict = get_provider_packages_metadata().get(provider_id) if provider_yaml_dict: validate_provider_info_with_runtime_schema(provider_yaml_dict) return provider_yaml_dict or {}
Return provider ids for all packages that are available currently (not suspended). :rtype: object :param include_suspended: whether the suspended packages should be included :param include_removed: whether the removed packages should be included :param include_not_ready: whether the not-ready packages should be included :param include_regular: whether the regular packages should be included :param include_non_provider_doc_packages: whether the non-provider doc packages should be included (packages like apache-airflow, helm-chart, docker-stack) :param include_all_providers: whether "all-providers" should be included ni the list.
def get_available_packages( include_non_provider_doc_packages: bool = False, include_all_providers: bool = False, include_suspended: bool = False, include_removed: bool = False, include_not_ready: bool = False, include_regular: bool = True, ) -> list[str]: """ Return provider ids for all packages that are available currently (not suspended). :rtype: object :param include_suspended: whether the suspended packages should be included :param include_removed: whether the removed packages should be included :param include_not_ready: whether the not-ready packages should be included :param include_regular: whether the regular packages should be included :param include_non_provider_doc_packages: whether the non-provider doc packages should be included (packages like apache-airflow, helm-chart, docker-stack) :param include_all_providers: whether "all-providers" should be included ni the list. """ provider_dependencies = json.loads(PROVIDER_DEPENDENCIES_JSON_FILE_PATH.read_text()) valid_states = set() if include_not_ready: valid_states.add("not-ready") if include_regular: valid_states.update({"ready", "pre-release"}) if include_suspended: valid_states.add("suspended") if include_removed: valid_states.add("removed") available_packages: list[str] = [ provider_id for provider_id, provider_dependencies in provider_dependencies.items() if provider_dependencies["state"] in valid_states ] if include_non_provider_doc_packages: available_packages.extend(REGULAR_DOC_PACKAGES) if include_all_providers: available_packages.append("all-providers") return sorted(set(available_packages))
In case there are "all-providers" in the list, expand the list with all providers.
def expand_all_provider_packages( short_doc_packages: tuple[str, ...], include_removed: bool = False, include_not_ready: bool = False, ) -> tuple[str, ...]: """In case there are "all-providers" in the list, expand the list with all providers.""" if "all-providers" in short_doc_packages: packages = [package for package in short_doc_packages if package != "all-providers"] packages.extend( get_available_packages(include_removed=include_removed, include_not_ready=include_not_ready) ) short_doc_packages = tuple(set(packages)) return short_doc_packages
Finds matching long package names based on short package name and package filters specified. The sequence of specified packages / filters is kept (filters first, packages next). In case there are filters that do not match any of the packages error is raised. :param short_packages: short forms of package names :param filters: package filters specified
def find_matching_long_package_names( short_packages: tuple[str, ...], filters: tuple[str, ...] | None = None, ) -> tuple[str, ...]: """Finds matching long package names based on short package name and package filters specified. The sequence of specified packages / filters is kept (filters first, packages next). In case there are filters that do not match any of the packages error is raised. :param short_packages: short forms of package names :param filters: package filters specified """ available_doc_packages = list( get_long_package_names(get_available_packages(include_non_provider_doc_packages=True)) ) if not filters and not short_packages: available_doc_packages.extend(filters or ()) return tuple(set(available_doc_packages)) processed_package_filters = list(filters or ()) processed_package_filters.extend(get_long_package_names(short_packages)) removed_packages: list[str] = [ f"apache-airflow-providers-{provider.replace('.','-')}" for provider in get_removed_provider_ids() ] all_packages_including_removed: list[str] = available_doc_packages + removed_packages invalid_filters = [ f for f in processed_package_filters if not any(fnmatch.fnmatch(p, f) for p in all_packages_including_removed) ] if invalid_filters: raise SystemExit( f"Some filters did not find any package: {invalid_filters}, Please check if they are correct." ) return tuple( [ p for p in all_packages_including_removed if any(fnmatch.fnmatch(p, f) for f in processed_package_filters) ] )
Returns PIP package name for the package id. :param provider_id: id of the package :return: the name of pip package
def get_pip_package_name(provider_id: str) -> str: """ Returns PIP package name for the package id. :param provider_id: id of the package :return: the name of pip package """ return "apache-airflow-providers-" + provider_id.replace(".", "-")
Returns Wheel package name prefix for the package id. :param provider_id: id of the package :return: the name of wheel package prefix
def get_wheel_package_name(provider_id: str) -> str: """ Returns Wheel package name prefix for the package id. :param provider_id: id of the package :return: the name of wheel package prefix """ return "apache_airflow_providers_" + provider_id.replace(".", "_")
Returns install requirements for the package. :param provider_id: id of the provider package :param version_suffix: optional version suffix for packages :return: install requirements of the package
def get_install_requirements(provider_id: str, version_suffix: str) -> str: """ Returns install requirements for the package. :param provider_id: id of the provider package :param version_suffix: optional version suffix for packages :return: install requirements of the package """ if provider_id in get_removed_provider_ids(): dependencies = get_provider_requirements(provider_id) else: dependencies = PROVIDER_DEPENDENCIES.get(provider_id)["deps"] install_requires = [apply_version_suffix(clause, version_suffix) for clause in dependencies] return "".join(f'\n "{ir}",' for ir in install_requires)
Finds extras for the package specified. :param provider_id: id of the package
def get_package_extras(provider_id: str, version_suffix: str) -> dict[str, list[str]]: """ Finds extras for the package specified. :param provider_id: id of the package """ if provider_id == "providers": return {} if provider_id in get_removed_provider_ids(): return {} extras_dict: dict[str, list[str]] = { module: [get_pip_package_name(module)] for module in PROVIDER_DEPENDENCIES.get(provider_id)["cross-providers-deps"] } provider_yaml_dict = get_provider_packages_metadata().get(provider_id) additional_extras = provider_yaml_dict.get("additional-extras") if provider_yaml_dict else None if additional_extras: for entry in additional_extras: name = entry["name"] dependencies = entry["dependencies"] if name in extras_dict: # remove non-versioned dependencies if versioned ones are coming existing_dependencies = set(extras_dict[name]) for new_dependency in dependencies: for dependency in existing_dependencies: # remove extra if exists as non-versioned one if new_dependency.startswith(dependency): extras_dict[name].remove(dependency) break extras_dict[name].append(new_dependency) else: extras_dict[name] = dependencies for extra, dependencies in extras_dict.items(): extras_dict[extra] = [apply_version_suffix(clause, version_suffix) for clause in dependencies] return extras_dict
Converts cross-package dependencies to a Markdown table :param cross_package_dependencies: list of cross-package dependencies :param markdown: if True, Markdown format is used else rst :return: formatted table
def convert_cross_package_dependencies_to_table( cross_package_dependencies: list[str], markdown: bool = True, ) -> str: """ Converts cross-package dependencies to a Markdown table :param cross_package_dependencies: list of cross-package dependencies :param markdown: if True, Markdown format is used else rst :return: formatted table """ from tabulate import tabulate headers = ["Dependent package", "Extra"] table_data = [] prefix = "apache-airflow-providers-" base_url = "https://airflow.apache.org/docs/" for dependency in cross_package_dependencies: pip_package_name = f"{prefix}{dependency.replace('.','-')}" url_suffix = f"{dependency.replace('.','-')}" if markdown: url = f"[{pip_package_name}]({base_url}{url_suffix})" else: url = f"`{pip_package_name} <{base_url}{prefix}{url_suffix}>`_" table_data.append((url, f"`{dependency}`" if markdown else f"``{dependency}``")) return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
Renders template based on its name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir. :param template_name: name of the template to use :param context: Jinja2 context :param extension: Target file extension :param autoescape: Whether to autoescape HTML :param keep_trailing_newline: Whether to keep the newline in rendered output :return: rendered template
def render_template( template_name: str, context: dict[str, Any], extension: str, autoescape: bool = True, keep_trailing_newline: bool = False, ) -> str: """ Renders template based on its name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir. :param template_name: name of the template to use :param context: Jinja2 context :param extension: Target file extension :param autoescape: Whether to autoescape HTML :param keep_trailing_newline: Whether to keep the newline in rendered output :return: rendered template """ import jinja2 template_loader = jinja2.FileSystemLoader( searchpath=BREEZE_SOURCES_ROOT / "src" / "airflow_breeze" / "templates" ) template_env = jinja2.Environment( loader=template_loader, undefined=jinja2.StrictUndefined, autoescape=autoescape, keep_trailing_newline=keep_trailing_newline, ) template = template_env.get_template(f"{template_name}_TEMPLATE{extension}.jinja2") content: str = template.render(context) return content
Make sure that apache remote exist in git. We need to take a log from the apache repository main branch - not locally because we might not have the latest version. Also, the local repo might be shallow, so we need to un-shallow it to see all the history. This will: * check if the remote exists and add if it does not * check if the local repo is shallow, mark it to un-shallow in this case * fetch from the remote including all tags and overriding local tags in case they are set differently
def make_sure_remote_apache_exists_and_fetch(github_repository: str = "apache/airflow"): """Make sure that apache remote exist in git. We need to take a log from the apache repository main branch - not locally because we might not have the latest version. Also, the local repo might be shallow, so we need to un-shallow it to see all the history. This will: * check if the remote exists and add if it does not * check if the local repo is shallow, mark it to un-shallow in this case * fetch from the remote including all tags and overriding local tags in case they are set differently """ try: run_command(["git", "remote", "get-url", HTTPS_REMOTE], text=True, capture_output=True) except subprocess.CalledProcessError as ex: if ex.returncode == 128 or ex.returncode == 2: run_command( [ "git", "remote", "add", HTTPS_REMOTE, f"https://github.com/{github_repository}.git", ], check=True, ) else: get_console().print( f"[error]Error {ex}[/]\n[error]When checking if {HTTPS_REMOTE} is set.[/]\n\n" ) sys.exit(1) get_console().print("[info]Fetching full history and tags from remote.") get_console().print("[info]This might override your local tags!") result = run_command( ["git", "rev-parse", "--is-shallow-repository"], check=True, capture_output=True, text=True, ) is_shallow_repo = result.stdout.strip() == "true" fetch_command = ["git", "fetch", "--tags", "--force", HTTPS_REMOTE] if is_shallow_repo: fetch_command.append("--unshallow") try: run_command(fetch_command) except subprocess.CalledProcessError as e: get_console().print( f"[error]Error {e}[/]\n" f"[error]When fetching tags from remote. Your tags might not be refreshed.[/]\n\n" f'[warning]Please refresh the tags manually via:[/]\n\n"' f'{" ".join(fetch_command)}\n\n' ) sys.exit(1)
Converts PIP requirement list to a Markdown table. :param requirements: requirements list :param markdown: if True, Markdown format is used else rst :return: formatted table
def convert_pip_requirements_to_table(requirements: Iterable[str], markdown: bool = True) -> str: """ Converts PIP requirement list to a Markdown table. :param requirements: requirements list :param markdown: if True, Markdown format is used else rst :return: formatted table """ from tabulate import tabulate headers = ["PIP package", "Version required"] table_data = [] for dependency in requirements: req = PipRequirements.from_requirement(dependency) formatted_package = f"`{req.package}`" if markdown else f"``{req.package}``" formatted_version = "" if req.version_required: formatted_version = f"`{req.version_required}`" if markdown else f"``{req.version_required}``" table_data.append((formatted_package, formatted_version)) return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
Return true if the tag exists in the provider repository.
def tag_exists_for_provider(provider_id: str, current_tag: str) -> bool: """Return true if the tag exists in the provider repository.""" provider_details = get_provider_details(provider_id) result = run_command( ["git", "rev-parse", current_tag], cwd=provider_details.source_provider_package_path, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=False, ) return result.returncode == 0
Returns latest tag for the provider.
def get_latest_provider_tag(provider_id: str, suffix: str) -> str: """Returns latest tag for the provider.""" provider_details = get_provider_details(provider_id) current_version = provider_details.versions[0] return get_version_tag(current_version, provider_id, suffix)
Get last lines of a file efficiently, without reading the whole file (with some limitations). Assumptions ara that line length not bigger than ~180 chars. :param file_name: name of the file :param num_lines: number of lines to return (max) :return: Tuple - last lines of the file in two variants: original and with removed ansi colours
def get_last_lines_of_file(file_name: str, num_lines: int = 2) -> tuple[list[str], list[str]]: """ Get last lines of a file efficiently, without reading the whole file (with some limitations). Assumptions ara that line length not bigger than ~180 chars. :param file_name: name of the file :param num_lines: number of lines to return (max) :return: Tuple - last lines of the file in two variants: original and with removed ansi colours """ # account for EOL max_read = (180 + 2) * num_lines try: seek_size = min(os.stat(file_name).st_size, max_read) except FileNotFoundError: return [], [] with open(file_name, "rb") as temp_f: temp_f.seek(-seek_size, os.SEEK_END) tail = temp_f.read().decode(errors="ignore") last_lines = tail.splitlines()[-num_lines:] last_lines_no_colors = [remove_ansi_colours(line) for line in last_lines] return last_lines, last_lines_no_colors
Print summary of completed async results. :param completed_list: list of completed async results.
def print_async_summary(completed_list: list[ApplyResult]) -> None: """ Print summary of completed async results. :param completed_list: list of completed async results. """ completed_list.sort(key=lambda x: x.get()[1]) get_console().print() for result in completed_list: return_code, info = result.get() info = info.replace("[", "\\[") if return_code != 0: get_console().print(f"[error]NOK[/] for {info}: Return code: {return_code}.") else: get_console().print(f"[success]OK [/] for {info}.") get_console().print()
Return completed results from the list.
def get_completed_result_list(results: list[ApplyResult]) -> list[ApplyResult]: """Return completed results from the list.""" return [result for result in results if result.ready()]
Check if all async results were success. Exits with error if not. :param results: results of parallel runs (expected in the form of Tuple: (return_code, info) :param outputs: outputs where results are written to :param success: Success string printed when everything is OK :param include_success_outputs: include outputs of successful parallel runs :param poll_time_seconds: what's the poll time between checks :param skip_cleanup: whether to skip cleanup of temporary files. :param summarize_on_ci: determines when to summarize the parallel jobs when they are completed in CI, outside the folded CI output :param summary_start_regexp: the regexp that determines line after which outputs should be printed as summary, so that you do not have to look at the folded details of the run in CI
def check_async_run_results( results: list[ApplyResult], success: str, outputs: list[Output], include_success_outputs: bool, poll_time_seconds: float = 0.2, skip_cleanup: bool = False, summarize_on_ci: SummarizeAfter = SummarizeAfter.NO_SUMMARY, summary_start_regexp: str | None = None, ): """ Check if all async results were success. Exits with error if not. :param results: results of parallel runs (expected in the form of Tuple: (return_code, info) :param outputs: outputs where results are written to :param success: Success string printed when everything is OK :param include_success_outputs: include outputs of successful parallel runs :param poll_time_seconds: what's the poll time between checks :param skip_cleanup: whether to skip cleanup of temporary files. :param summarize_on_ci: determines when to summarize the parallel jobs when they are completed in CI, outside the folded CI output :param summary_start_regexp: the regexp that determines line after which outputs should be printed as summary, so that you do not have to look at the folded details of the run in CI """ from airflow_breeze.utils.ci_group import ci_group completed_number = 0 total_number_of_results = len(results) completed_list = get_completed_result_list(results) while not len(completed_list) == total_number_of_results: current_completed_number = len(completed_list) if current_completed_number != completed_number: completed_number = current_completed_number get_console().print( f"\n[info]Completed {completed_number} out of {total_number_of_results} " f"({completed_number / total_number_of_results:.0%}).[/]\n" ) print_async_summary(completed_list) time.sleep(poll_time_seconds) completed_list = get_completed_result_list(results) completed_number = len(completed_list) get_console().print( f"\n[info]Completed {completed_number} out of {total_number_of_results} " f"({completed_number / total_number_of_results:.0%}).[/]\n" ) print_async_summary(completed_list) errors = False for i, result in enumerate(results): if result.get()[0] != 0: errors = True message_type = MessageType.ERROR else: message_type = MessageType.SUCCESS if message_type == MessageType.ERROR or include_success_outputs: with ci_group(title=f"{outputs[i].escaped_title}", message_type=message_type): os.write(1, Path(outputs[i].file_name).read_bytes()) else: get_console().print(f"[success]{outputs[i].escaped_title} OK[/]") if summarize_on_ci != SummarizeAfter.NO_SUMMARY: regex = re.compile(summary_start_regexp) if summary_start_regexp is not None else None for i, result in enumerate(results): failure = result.get()[0] != 0 if summarize_on_ci in [ SummarizeAfter.BOTH, SummarizeAfter.FAILURE if failure else SummarizeAfter.SUCCESS, ]: print_lines = False for line in Path(outputs[i].file_name).read_bytes().decode(errors="ignore").splitlines(): if not print_lines and (regex is None or regex.match(remove_ansi_colours(line))): print_lines = True get_console().print(f"\n[info]Summary: {outputs[i].escaped_title:<30}:\n") if print_lines: print(line) try: if errors: get_console().print("\n[error]There were errors when running some tasks. Quitting.[/]\n") from airflow_breeze.utils.docker_command_utils import fix_ownership_using_docker fix_ownership_using_docker() sys.exit(1) else: get_console().print(f"\n[success]{success}[/]\n") from airflow_breeze.utils.docker_command_utils import fix_ownership_using_docker fix_ownership_using_docker() finally: if not skip_cleanup: for output in outputs: Path(output.file_name).unlink(missing_ok=True)
Retrieves hash of setup files from the source of installation of Breeze. This is used in order to determine if we need to upgrade Breeze, because some setup files changed. Blake2b algorithm will not be flagged by security checkers as insecure algorithm (in Python 3.9 and above we can use `usedforsecurity=False` to disable it, but for now it's better to use more secure algorithms.
def get_package_setup_metadata_hash() -> str: """ Retrieves hash of setup files from the source of installation of Breeze. This is used in order to determine if we need to upgrade Breeze, because some setup files changed. Blake2b algorithm will not be flagged by security checkers as insecure algorithm (in Python 3.9 and above we can use `usedforsecurity=False` to disable it, but for now it's better to use more secure algorithms. """ # local imported to make sure that autocomplete works try: from importlib.metadata import distribution # type: ignore[attr-defined] except ImportError: from importlib_metadata import distribution # type: ignore[no-redef, assignment] prefix = "Package config hash: " for line in distribution("apache-airflow-breeze").metadata.as_string().splitlines(keepends=False): if line.startswith(prefix): return line[len(prefix) :] return "NOT FOUND"
Retrieves hash of pyproject.toml from the source of installation of Breeze. This is used in order to determine if we need to upgrade Breeze, because some setup files changed. Blake2b algorithm will not be flagged by security checkers as insecure algorithm (in Python 3.9 and above we can use `usedforsecurity=False` to disable it, but for now it's better to use more secure algorithms.
def get_installation_sources_config_metadata_hash() -> str: """ Retrieves hash of pyproject.toml from the source of installation of Breeze. This is used in order to determine if we need to upgrade Breeze, because some setup files changed. Blake2b algorithm will not be flagged by security checkers as insecure algorithm (in Python 3.9 and above we can use `usedforsecurity=False` to disable it, but for now it's better to use more secure algorithms. """ installation_sources = get_installation_airflow_sources() if installation_sources is None: return "NOT FOUND" return get_pyproject_toml_hash(installation_sources)
Retrieves hash of setup files from the currently used sources.
def get_used_sources_setup_metadata_hash() -> str: """ Retrieves hash of setup files from the currently used sources. """ return get_pyproject_toml_hash(get_used_airflow_sources())
When we run upgrade check --answer is not parsed yet, so we need to guess it.
def set_forced_answer_for_upgrade_check(): """When we run upgrade check --answer is not parsed yet, so we need to guess it.""" if "--answer n" in " ".join(sys.argv).lower() or os.environ.get("ANSWER", "").lower().startswith("n"): set_forced_answer("no") if "--answer y" in " ".join(sys.argv).lower() or os.environ.get("ANSWER", "").lower().startswith("y"): set_forced_answer("yes") if "--answer q" in " ".join(sys.argv).lower() or os.environ.get("ANSWER", "").lower().startswith("q"): set_forced_answer("quit")
Prints warning if detected airflow sources are not the ones that Breeze was installed with. :return: True if warning was printed.
def reinstall_if_setup_changed() -> bool: """ Prints warning if detected airflow sources are not the ones that Breeze was installed with. :return: True if warning was printed. """ try: package_hash = get_package_setup_metadata_hash() except ModuleNotFoundError as e: if "importlib_metadata" in e.msg: return False if "apache-airflow-breeze" in e.msg: print( """Missing Package `apache-airflow-breeze`. Use `pipx install -e ./dev/breeze` to install the package.""" ) return False sources_hash = get_installation_sources_config_metadata_hash() if sources_hash != package_hash: installation_sources = get_installation_airflow_sources() if installation_sources is not None: breeze_sources = installation_sources / "dev" / "breeze" warn_dependencies_changed() process_breeze_readme(breeze_sources, sources_hash) set_forced_answer_for_upgrade_check() reinstall_breeze(breeze_sources) set_forced_answer(None) return True return False
Prints warning if detected airflow sources are not the ones that Breeze was installed with. :param airflow_sources: source for airflow code that we are operating on :return: True if warning was printed.
def reinstall_if_different_sources(airflow_sources: Path) -> bool: """ Prints warning if detected airflow sources are not the ones that Breeze was installed with. :param airflow_sources: source for airflow code that we are operating on :return: True if warning was printed. """ installation_airflow_sources = get_installation_airflow_sources() if installation_airflow_sources and airflow_sources != installation_airflow_sources: reinstall_breeze(airflow_sources / "dev" / "breeze") return True return False
Retrieves the Root of the Airflow Sources where Breeze was installed from. :return: the Path for Airflow sources.
def get_installation_airflow_sources() -> Path | None: """ Retrieves the Root of the Airflow Sources where Breeze was installed from. :return: the Path for Airflow sources. """ return search_upwards_for_airflow_sources_root(Path(__file__).resolve().parent)
Retrieves the Root of used Airflow Sources which we operate on. Those are either Airflow sources found upwards in directory tree or sources where Breeze was installed from. :return: the Path for Airflow sources we use.
def get_used_airflow_sources() -> Path: """ Retrieves the Root of used Airflow Sources which we operate on. Those are either Airflow sources found upwards in directory tree or sources where Breeze was installed from. :return: the Path for Airflow sources we use. """ current_sources = search_upwards_for_airflow_sources_root(Path.cwd()) if current_sources is None: current_sources = get_installation_airflow_sources() if current_sources is None: warn_non_editable() sys.exit(1) return current_sources
Find the root of airflow sources we operate on. Handle the case when Breeze is installed via `pipx` from a different source tree, so it searches upwards of the current directory to find the right root of airflow directory we are actually in. This **might** be different than the sources of Airflow Breeze was installed from. If not found, we operate on Airflow sources that we were installed it. This handles the case when we run Breeze from a "random" directory. This method also handles the following errors and warnings: * It fails (and exits hard) if Breeze is installed in non-editable mode (in which case it will not find the Airflow sources when walking upwards the directory where it is installed) * It warns (with 2 seconds timeout) if you are using Breeze from a different airflow sources than the one you operate on. * If we are running in the same source tree as where Breeze was installed from (so no warning above), it warns (with 2 seconds timeout) if there is a change in setup.* files of Breeze since installation time. In such case usesr is encouraged to re-install Breeze to update dependencies. :return: Path for the found sources.
def find_airflow_sources_root_to_operate_on() -> Path: """ Find the root of airflow sources we operate on. Handle the case when Breeze is installed via `pipx` from a different source tree, so it searches upwards of the current directory to find the right root of airflow directory we are actually in. This **might** be different than the sources of Airflow Breeze was installed from. If not found, we operate on Airflow sources that we were installed it. This handles the case when we run Breeze from a "random" directory. This method also handles the following errors and warnings: * It fails (and exits hard) if Breeze is installed in non-editable mode (in which case it will not find the Airflow sources when walking upwards the directory where it is installed) * It warns (with 2 seconds timeout) if you are using Breeze from a different airflow sources than the one you operate on. * If we are running in the same source tree as where Breeze was installed from (so no warning above), it warns (with 2 seconds timeout) if there is a change in setup.* files of Breeze since installation time. In such case usesr is encouraged to re-install Breeze to update dependencies. :return: Path for the found sources. """ sources_root_from_env = os.getenv("AIRFLOW_SOURCES_ROOT", None) if sources_root_from_env: return Path(sources_root_from_env) installation_airflow_sources = get_installation_airflow_sources() if installation_airflow_sources is None and not skip_breeze_self_upgrade_check(): get_console().print( "\n[error]Breeze should only be installed with -e flag[/]\n\n" "[warning]Please go to Airflow sources and run[/]\n\n" f" {NAME} setup self-upgrade --use-current-airflow-sources\n" '[warning]If during installation you see warning starting "Ignoring --editable install",[/]\n' '[warning]make sure you first downgrade "packaging" package to <23.2, for example by:[/]\n\n' f' pip install "packaging<23.2"\n\n' ) sys.exit(1) airflow_sources = get_used_airflow_sources() if not skip_breeze_self_upgrade_check(): # only print warning and sleep if not producing complete results reinstall_if_different_sources(airflow_sources) reinstall_if_setup_changed() os.chdir(airflow_sources.as_posix()) airflow_home_dir = Path(os.environ.get("AIRFLOW_HOME", (Path.home() / "airflow").resolve().as_posix())) if airflow_sources.resolve() == airflow_home_dir.resolve(): get_console().print( f"\n[error]Your Airflow sources are checked out in {airflow_home_dir} which " f"is your also your AIRFLOW_HOME where airflow writes logs and database. \n" f"This is a bad idea because Airflow might override and cleanup your checked out " f"sources and .git repository.[/]\n" ) get_console().print("\nPlease check out your Airflow code elsewhere.\n") sys.exit(1) return airflow_sources
Creates all directories and files that are needed for Breeze to work via docker-compose. Checks if setup has been updates since last time and proposes to upgrade if so.
def create_directories_and_files() -> None: """ Creates all directories and files that are needed for Breeze to work via docker-compose. Checks if setup has been updates since last time and proposes to upgrade if so. """ BUILD_CACHE_DIR.mkdir(parents=True, exist_ok=True) DAGS_DIR.mkdir(parents=True, exist_ok=True) FILES_DIR.mkdir(parents=True, exist_ok=True) HOOKS_DIR.mkdir(parents=True, exist_ok=True) KUBE_DIR.mkdir(parents=True, exist_ok=True) LOGS_DIR.mkdir(parents=True, exist_ok=True) DIST_DIR.mkdir(parents=True, exist_ok=True) OUTPUT_LOG.mkdir(parents=True, exist_ok=True) (AIRFLOW_SOURCES_ROOT / ".bash_aliases").touch() (AIRFLOW_SOURCES_ROOT / ".bash_history").touch() (AIRFLOW_SOURCES_ROOT / ".inputrc").touch()
Replace different platform variants of the platform provided platforms with the two canonical ones we are using: amd64 and arm64.
def get_real_platform(single_platform: str) -> str: """ Replace different platform variants of the platform provided platforms with the two canonical ones we are using: amd64 and arm64. """ return single_platform.replace("x86_64", "amd64").replace("aarch64", "arm64").replace("/", "-")
Check if the current platform is WSL2. This method will exit with error printing appropriate message if WSL1 is detected as WSL1 is not supported. :return: True if the current platform is WSL2, False otherwise (unless it's WSL1 then it exits).
def is_wsl2() -> bool: """ Check if the current platform is WSL2. This method will exit with error printing appropriate message if WSL1 is detected as WSL1 is not supported. :return: True if the current platform is WSL2, False otherwise (unless it's WSL1 then it exits). """ if not sys.platform.startswith("linux"): return False release_name = platform.uname().release has_wsl_interop = _exists_no_permission_error("/proc/sys/fs/binfmt_misc/WSLInterop") microsoft_in_release = "microsoft" in release_name.lower() wsl_conf = _exists_no_permission_error("/etc/wsl.conf") if not has_wsl_interop and not microsoft_in_release and not wsl_conf: return False if microsoft_in_release: # Release name WSL1 detection if "Microsoft" in release_name: message_on_wsl1_detected(release_name=release_name, kernel_version=None) sys.exit(1) return True # Kernel WSL1 detection kernel_version: tuple[int, ...] = (0, 0) if len(parts := release_name.split(".", 2)[:2]) == 2: try: kernel_version = tuple(map(int, parts)) except (TypeError, ValueError): pass if kernel_version < (4, 19): message_on_wsl1_detected(release_name=None, kernel_version=kernel_version) sys.exit(1) return True
Gets cross dependencies of a provider. :param provider_to_check: id of the provider to check :param upstream_dependencies: whether to include providers that depend on it :param downstream_dependencies: whether to include providers it depends on :return: set of dependent provider ids
def get_related_providers( provider_to_check: str, upstream_dependencies: bool, downstream_dependencies: bool, ) -> set[str]: """ Gets cross dependencies of a provider. :param provider_to_check: id of the provider to check :param upstream_dependencies: whether to include providers that depend on it :param downstream_dependencies: whether to include providers it depends on :return: set of dependent provider ids """ if not upstream_dependencies and not downstream_dependencies: raise ValueError("At least one of upstream_dependencies or downstream_dependencies must be True") related_providers = set() if upstream_dependencies: # Providers that use this provider for provider, provider_info in DEPENDENCIES.items(): if provider_to_check in provider_info["cross-providers-deps"]: related_providers.add(provider) # and providers we use directly if downstream_dependencies: for dep_name in DEPENDENCIES[provider_to_check]["cross-providers-deps"]: related_providers.add(dep_name) return related_providers
Returns list of provider.yaml files
def get_provider_yaml_paths(): """Returns list of provider.yaml files""" return sorted(glob(f"{ROOT_DIR}/airflow/providers/**/provider.yaml", recursive=True))
Formats path nicely.
def pretty_format_path(path: str, start: str) -> str: """Formats path nicely.""" relpath = os.path.relpath(path, start) if relpath == path: return path return f"{start}/{relpath}"
Prepare code snippet with line numbers and a specific line marked. :param file_path: File name :param line_no: Line number :param context_lines_count: The number of lines that will be cut before and after. :return: str
def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str: """ Prepare code snippet with line numbers and a specific line marked. :param file_path: File name :param line_no: Line number :param context_lines_count: The number of lines that will be cut before and after. :return: str """ with open(file_path) as text_file: # Highlight code code = text_file.read() code_lines = code.splitlines() # Prepend line number code_lines = [ f">{lno:3} | {line}" if line_no == lno else f"{lno:4} | {line}" for lno, line in enumerate(code_lines, 1) ] # # Cut out the snippet start_line_no = max(0, line_no - context_lines_count - 1) end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines code = "\n".join(code_lines) return code
Retrieve and validate space-separated list of Python versions and return them in the form of list. :param python_versions: space separated list of Python versions :return: List of python versions
def get_python_version_list(python_versions: str) -> list[str]: """ Retrieve and validate space-separated list of Python versions and return them in the form of list. :param python_versions: space separated list of Python versions :return: List of python versions """ python_version_list = python_versions.split(" ") errors = False for python in python_version_list: if python not in ALLOWED_PYTHON_MAJOR_MINOR_VERSIONS: get_console().print( f"[error]The Python version {python} passed in {python_versions} is wrong.[/]" ) errors = True if errors: get_console().print( f"\nSome of the Python versions passed are not in the " f"list: {ALLOWED_PYTHON_MAJOR_MINOR_VERSIONS}. Quitting.\n" ) sys.exit(1) return python_version_list
Reinstalls Breeze from specified sources. :param breeze_sources: Sources where to install Breeze from. :param re_run: whether to re-run the original command that breeze was run with.
def reinstall_breeze(breeze_sources: Path, re_run: bool = True): """ Reinstalls Breeze from specified sources. :param breeze_sources: Sources where to install Breeze from. :param re_run: whether to re-run the original command that breeze was run with. """ # Note that we cannot use `pipx upgrade` here because we sometimes install # Breeze from different sources than originally installed (i.e. when we reinstall airflow # From the current directory. get_console().print(f"\n[info]Reinstalling Breeze from {breeze_sources}\n") subprocess.check_call(["pipx", "install", "-e", str(breeze_sources), "--force"]) if re_run: # Make sure we don't loop forever if the metadata hash hasn't been updated yet (else it is tricky to # run pre-commit checks via breeze!) os.environ["SKIP_BREEZE_SELF_UPGRADE_CHECK"] = "true" os.execl(sys.executable, sys.executable, *sys.argv) get_console().print(f"\n[info]Breeze has been reinstalled from {breeze_sources}. Exiting now.[/]\n\n") sys.exit(0)
Context manager for changing the current working directory
def cd(new_path: Path): """Context manager for changing the current working directory""" previous_path = os.getcwd() try: os.chdir(new_path.as_posix()) yield finally: os.chdir(previous_path)
Context manager for changing the current locale
def setlocale(name: str): """Context manager for changing the current locale""" saved_locale = locale.setlocale(locale.LC_ALL) try: yield locale.setlocale(locale.LC_ALL, name) finally: locale.setlocale(locale.LC_ALL, saved_locale)
Repack a .tar.gz archive in a deterministic (reproducible) manner. See https://reproducible-builds.org/docs/archives/ for more details.
def repack_deterministically( source_archive: Path, dest_archive: Path, prepend_path=None, timestamp=0 ) -> CompletedProcess | CalledProcessError: """Repack a .tar.gz archive in a deterministic (reproducible) manner. See https://reproducible-builds.org/docs/archives/ for more details.""" def reset(tarinfo): """Helper to reset owner/group and modification time for tar entries""" tarinfo.uid = tarinfo.gid = 0 tarinfo.uname = tarinfo.gname = "root" tarinfo.mtime = timestamp return tarinfo check_python_version() OUT_DIR.mkdir(exist_ok=True) shutil.rmtree(REPRODUCIBLE_DIR, ignore_errors=True) REPRODUCIBLE_DIR.mkdir(exist_ok=True) result = run_command( [ "tar", "-xf", source_archive.as_posix(), "-C", REPRODUCIBLE_DIR.as_posix(), ], check=False, ) if result.returncode != 0: return result dest_archive.unlink(missing_ok=True) result = run_command( [ "chmod", "-R", "go=", REPRODUCIBLE_DIR.as_posix(), ], check=False, ) with cd(REPRODUCIBLE_DIR): current_dir = "." file_list = [current_dir] for root, dirs, files in os.walk(current_dir): for name in itertools.chain(dirs, files): file_list.append(os.path.join(root, name)) # Sort file entries with the fixed locale with setlocale("C"): file_list.sort(key=locale.strxfrm) # Use a temporary file and atomic rename to avoid partially-formed # packaging (in case of exceptional situations like running out of disk space). temp_file = f"{dest_archive}.temp~" with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), "wb") as out_file: with gzip.GzipFile(fileobj=out_file, mtime=0, mode="wb") as gzip_file: with tarfile.open(fileobj=gzip_file, mode="w:") as tar_file: # type: ignore for entry in file_list: arcname = entry if prepend_path is not None: arcname = os.path.normpath(os.path.join(prepend_path, arcname)) if arcname == ".": continue if arcname.startswith("./"): arcname = arcname[2:] tar_file.add(entry, filter=reset, recursive=False, arcname=arcname) os.rename(temp_file, dest_archive) return result
Runs command passed as list of strings with some extra functionality over POpen (kwargs from PoPen can be used in this command even if not explicitly specified). It prints diagnostics when requested, also allows to "dry_run" the commands rather than actually execute them. An important factor for having this command running tool is to be able (in verbose mode) to directly copy&paste the verbose output and run the command manually - including all the environment variables needed to run the command. :param cmd: command to run :param title: optional title for the command (otherwise likely title is automatically determined) :param check: whether to check status value and run exception (same as POpem) :param no_output_dump_on_exception: whether to suppress printing logs from output when command fails :param env: mapping of environment variables to set for the run command :param cwd: working directory to set for the command :param input: input string to pass to stdin of the process :param output: redirects stderr/stdout to Output if set to Output class. :param output_outside_the_group: if this is set to True, then output of the command will be done outside the "CI folded group" in CI - so that it is immediately visible without unfolding. :param verbose_override: override verbose parameter with the one specified if not None. :param dry_run_override: override dry_run parameter with the one specified if not None. :param kwargs: kwargs passed to POpen
def run_command( cmd: list[str] | str, title: str | None = None, *, check: bool = True, no_output_dump_on_exception: bool = False, env: Mapping[str, str] | None = None, cwd: Path | str | None = None, input: str | None = None, output: Output | None = None, output_outside_the_group: bool = False, verbose_override: bool | None = None, dry_run_override: bool | None = None, **kwargs, ) -> RunCommandResult: """ Runs command passed as list of strings with some extra functionality over POpen (kwargs from PoPen can be used in this command even if not explicitly specified). It prints diagnostics when requested, also allows to "dry_run" the commands rather than actually execute them. An important factor for having this command running tool is to be able (in verbose mode) to directly copy&paste the verbose output and run the command manually - including all the environment variables needed to run the command. :param cmd: command to run :param title: optional title for the command (otherwise likely title is automatically determined) :param check: whether to check status value and run exception (same as POpem) :param no_output_dump_on_exception: whether to suppress printing logs from output when command fails :param env: mapping of environment variables to set for the run command :param cwd: working directory to set for the command :param input: input string to pass to stdin of the process :param output: redirects stderr/stdout to Output if set to Output class. :param output_outside_the_group: if this is set to True, then output of the command will be done outside the "CI folded group" in CI - so that it is immediately visible without unfolding. :param verbose_override: override verbose parameter with the one specified if not None. :param dry_run_override: override dry_run parameter with the one specified if not None. :param kwargs: kwargs passed to POpen """ def exclude_command(_index: int, _arg: str) -> bool: if _index == 0: # First argument is always passed return False if _arg.startswith("-"): return True if not _arg: return True if _arg.startswith("/"): # Skip any absolute paths return True if _arg == "never": return True if OPTION_MATCHER.match(_arg): return True return False def shorten_command(_index: int, _argument: str) -> str: if _argument.startswith("/"): _argument = _argument.split("/")[-1] return shlex.quote(_argument) if not title: shortened_command = [ shorten_command(index, argument) for index, argument in enumerate(cmd if isinstance(cmd, list) else shlex.split(cmd)) if not exclude_command(index, argument) ] # Heuristics to get a (possibly) short but explanatory title showing what the command does # If title is not provided explicitly title = "<" + " ".join(shortened_command[:5]) + ">" # max 4 args workdir: str = str(cwd) if cwd else os.getcwd() cmd_env = os.environ.copy() cmd_env.setdefault("HOME", str(Path.home())) if env: cmd_env.update(env) if output: if "capture_output" not in kwargs or not kwargs["capture_output"]: kwargs["stdout"] = output.file kwargs["stderr"] = subprocess.STDOUT command_to_print = " ".join(shlex.quote(c) for c in cmd) if isinstance(cmd, list) else cmd env_to_print = get_environments_to_print(env) if not get_verbose(verbose_override) and not get_dry_run(dry_run_override): return subprocess.run(cmd, input=input, check=check, env=cmd_env, cwd=workdir, **kwargs) with ci_group(title=f"Running command: {title}", message_type=None): get_console(output=output).print(f"\n[info]Working directory {workdir}\n") if input: get_console(output=output).print("[info]Input:") get_console(output=output).print(input) get_console(output=output).print() # Soft wrap allows to copy&paste and run resulting output as it has no hard EOL get_console(output=output).print( f"\n[info]{env_to_print}{escape(command_to_print)}[/]\n", soft_wrap=True ) if get_dry_run(dry_run_override): return subprocess.CompletedProcess(cmd, returncode=0, stdout="", stderr="") try: if output_outside_the_group and os.environ.get("GITHUB_ACTIONS") == "true": get_console().print("::endgroup::") return subprocess.run(cmd, input=input, check=check, env=cmd_env, cwd=workdir, **kwargs) except subprocess.CalledProcessError as ex: if no_output_dump_on_exception: if check: raise return ex if ex.stdout: get_console(output=output).print( "[info]========================= OUTPUT start ============================[/]" ) get_console(output=output).print(ex.stdout) get_console(output=output).print( "[info]========================= OUTPUT end ==============================[/]" ) if ex.stderr: get_console(output=output).print( "[error]========================= STDERR start ============================[/]" ) get_console(output=output).print(ex.stderr) get_console(output=output).print( "[error]========================= STDERR end ==============================[/]" ) if check: raise return ex
Check if pre-commit is installed in the right version. :return: True is the pre-commit is installed in the right version.
def assert_pre_commit_installed(): """ Check if pre-commit is installed in the right version. :return: True is the pre-commit is installed in the right version. """ # Local import to make autocomplete work import yaml from packaging.version import Version pre_commit_config = yaml.safe_load((AIRFLOW_SOURCES_ROOT / ".pre-commit-config.yaml").read_text()) min_pre_commit_version = pre_commit_config["minimum_pre_commit_version"] python_executable = sys.executable get_console().print(f"[info]Checking pre-commit installed for {python_executable}[/]") command_result = run_command( [python_executable, "-m", "pre_commit", "--version"], capture_output=True, text=True, check=False, ) if command_result.returncode == 0: if command_result.stdout: pre_commit_version = command_result.stdout.split(" ")[-1].strip() if Version(pre_commit_version) >= Version(min_pre_commit_version): get_console().print( f"\n[success]Package pre_commit is installed. " f"Good version {pre_commit_version} (>= {min_pre_commit_version})[/]\n" ) else: get_console().print( f"\n[error]Package name pre_commit version is wrong. It should be" f"aat least {min_pre_commit_version} and is {pre_commit_version}.[/]\n\n" ) sys.exit(1) else: get_console().print( "\n[warning]Could not determine version of pre-commit. You might need to update it![/]\n" ) else: get_console().print("\n[error]Error checking for pre-commit-installation:[/]\n") get_console().print(command_result.stderr) get_console().print("\nMake sure to run:\n breeze setup self-upgrade\n\n") sys.exit(1)
Determine the type of filesystem used - we might want to use different parameters if tmpfs is used. :param filepath: path to check :return: type of filesystem
def get_filesystem_type(filepath: str): """ Determine the type of filesystem used - we might want to use different parameters if tmpfs is used. :param filepath: path to check :return: type of filesystem """ # We import it locally so that click autocomplete works try: import psutil except ImportError: return "unknown" root_type = "unknown" for part in psutil.disk_partitions(all=True): if part.mountpoint == "/": root_type = part.fstype elif filepath.startswith(part.mountpoint): return part.fstype return root_type
Print instructions to the user that they should build the image
def instruct_build_image(python: str): """Print instructions to the user that they should build the image""" get_console().print(f"[warning]\nThe CI image for Python version {python} may be outdated[/]\n") get_console().print( f"\n[info]Please run at the earliest " f"convenience:[/]\n\nbreeze ci-image build --python {python}\n\n" )
Update file permissions to not be group-writeable. Needed to solve cache invalidation problems.
def change_file_permission(file_to_fix: Path): """Update file permissions to not be group-writeable. Needed to solve cache invalidation problems.""" if file_to_fix.exists(): current = stat.S_IMODE(os.stat(file_to_fix).st_mode) new = current & ~stat.S_IWGRP & ~stat.S_IWOTH # Removes group/other write permission os.chmod(file_to_fix, new)
Update directory permissions to not be group-writeable. Needed to solve cache invalidation problems.
def change_directory_permission(directory_to_fix: Path): """Update directory permissions to not be group-writeable. Needed to solve cache invalidation problems.""" if directory_to_fix.exists(): current = stat.S_IMODE(os.stat(directory_to_fix).st_mode) new = current & ~stat.S_IWGRP & ~stat.S_IWOTH # Removes group/other write permission new = ( new | stat.S_IXGRP | stat.S_IXOTH ) # Add group/other execute permission (to be able to list directories) os.chmod(directory_to_fix, new)
Fixes permissions of all the files and directories that have group-write access.
def fix_group_permissions(): """Fixes permissions of all the files and directories that have group-write access.""" if get_verbose(): get_console().print("[info]Fixing group permissions[/]") files_to_fix_result = run_command(["git", "ls-files", "./"], capture_output=True, text=True) if files_to_fix_result.returncode == 0: files_to_fix = files_to_fix_result.stdout.strip().splitlines() for file_to_fix in files_to_fix: change_file_permission(Path(file_to_fix)) directories_to_fix_result = run_command( ["git", "ls-tree", "-r", "-d", "--name-only", "HEAD"], capture_output=True, text=True ) if directories_to_fix_result.returncode == 0: directories_to_fix = directories_to_fix_result.stdout.strip().splitlines() for directory_to_fix in directories_to_fix: change_directory_permission(Path(directory_to_fix))
Returns True if the local branch contains the latest remote SHA (i.e. if it is rebased)
def is_repo_rebased(repo: str, branch: str): """Returns True if the local branch contains the latest remote SHA (i.e. if it is rebased)""" # We import it locally so that click autocomplete works import requests gh_url = f"https://api.github.com/repos/{repo}/commits/{branch}" headers_dict = {"Accept": "application/vnd.github.VERSION.sha"} latest_sha = requests.get(gh_url, headers=headers_dict).text.strip() rebased = False command_result = run_command(["git", "log", "--format=format:%H"], capture_output=True, text=True) commit_list = command_result.stdout.strip().splitlines() if command_result is not None else "missing" if latest_sha in commit_list: rebased = True return rebased
Checks if buildx plugin is locally available. :return True if the buildx plugin is installed.
def check_if_buildx_plugin_installed() -> bool: """ Checks if buildx plugin is locally available. :return True if the buildx plugin is installed. """ check_buildx = ["docker", "buildx", "version"] docker_buildx_version_result = run_command( check_buildx, no_output_dump_on_exception=True, capture_output=True, text=True, check=False, ) if docker_buildx_version_result.returncode == 0: return True return False
Returns commit SHA of current repo. Cached for various usages.
def commit_sha(): """Returns commit SHA of current repo. Cached for various usages.""" command_result = run_command(["git", "rev-parse", "HEAD"], capture_output=True, text=True, check=False) if command_result.stdout: return command_result.stdout.strip() else: return "COMMIT_SHA_NOT_FOUND"
Kills all processes in the process group and ignore if the group is missing. :param gid: process group id
def kill_process_group(gid: int): """ Kills all processes in the process group and ignore if the group is missing. :param gid: process group id """ try: os.killpg(gid, signal.SIGTERM) except OSError: pass
Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of SpellingError.
def parse_spelling_warnings(warning_text: str, docs_dir: str) -> list[SpellingError]: """ Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of SpellingError. """ sphinx_spelling_errors = [] for sphinx_warning in warning_text.splitlines(): if not sphinx_warning: continue warning_parts = None match = re.search(r"(.*):(\w*):\s\((\w*)\)\s?(\w*)\s?(.*)", sphinx_warning) if match: warning_parts = match.groups() if warning_parts and len(warning_parts) == 5: try: sphinx_spelling_errors.append( SpellingError( file_path=os.path.join(docs_dir, warning_parts[0]), line_no=int(warning_parts[1]) if warning_parts[1] not in ("None", "") else None, spelling=warning_parts[2], suggestion=warning_parts[3] if warning_parts[3] else None, context_line=warning_parts[4], message=sphinx_warning, ) ) except Exception: # If an exception occurred while parsing the warning message, display the raw warning message. sphinx_spelling_errors.append( SpellingError( file_path=None, line_no=None, spelling=None, suggestion=None, context_line=None, message=sphinx_warning, ) ) else: sphinx_spelling_errors.append( SpellingError( file_path=None, line_no=None, spelling=None, suggestion=None, context_line=None, message=sphinx_warning, ) ) return sphinx_spelling_errors
Displays summary of Spelling errors
def display_spelling_error_summary(spelling_errors: dict[str, list[SpellingError]]) -> None: """Displays summary of Spelling errors""" console.print() console.print("[red]" + "#" * 30 + " Start spelling errors summary " + "#" * 30 + "[/]") console.print() for package_name, errors in sorted(spelling_errors.items()): if package_name: console.print("=" * 30, f" [info]{package_name}[/] ", "=" * 30) else: console.print("=" * 30, " [info]General[/] ", "=" * 30) for warning_no, error in enumerate(sorted(errors), 1): console.print("-" * 30, f"Error {warning_no:3}", "-" * 30) _display_error(error) console.print("=" * 100) console.print() msg = """ If there are spelling errors in the summary above, and the spelling is correct, add the spelling to docs/spelling_wordlist.txt or use the spelling directive. Check https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#private-dictionaries for more details. If there are no spelling errors in the summary above, there might be an issue unrelated to spelling. Please review the traceback. """ console.print(msg) console.print() console.print console.print("[red]" + "#" * 30 + " End docs build errors summary " + "#" * 30 + "[/]") console.print
Get the timeout for the uvicorn server. We do not want to change the default value to not slow down the --help and command line in general and also it might be useful to give escape hatch in case our WSL1 detection is wrong (it will fail default --use-uv build, but you will be able to skip the check by manually specifying --uv-http-timeout or --no-use-uv). So we only check for wsl2 when default value is used and when uv is enabled.
def get_uv_timeout(build_params: CommonBuildParams) -> int: """ Get the timeout for the uvicorn server. We do not want to change the default value to not slow down the --help and command line in general and also it might be useful to give escape hatch in case our WSL1 detection is wrong (it will fail default --use-uv build, but you will be able to skip the check by manually specifying --uv-http-timeout or --no-use-uv). So we only check for wsl2 when default value is used and when uv is enabled. """ if build_params.uv_http_timeout != DEFAULT_UV_HTTP_TIMEOUT: # a bit of hack: if you specify 300 in command line it will also be overridden in case of WSL2 # but this is a corner case return build_params.uv_http_timeout if is_wsl2(): return DEFAULT_WSL2_HTTP_TIMEOUT return build_params.uv_http_timeout
Strips leading zeros from version number. This converts 1974.04.03 to 1974.4.3 as the format with leading month and day zeros is not accepted by PIP versioning. :param version: version number in CALVER format (potentially with leading 0s in date and month) :return: string with leading 0s after dot replaced.
def strip_leading_zeros_from_version(version: str) -> str: """ Strips leading zeros from version number. This converts 1974.04.03 to 1974.4.3 as the format with leading month and day zeros is not accepted by PIP versioning. :param version: version number in CALVER format (potentially with leading 0s in date and month) :return: string with leading 0s after dot replaced. """ return ".".join(str(int(i)) for i in version.split("."))
Test simple automated classification of the changes based on their single-line description.
def test_classify_changes_automatically( descriptions: list[str], with_breaking_changes: bool, maybe_with_new_features: bool, breaking_count: int, feature_count: int, bugfix_count: int, other_count: int, ): """Test simple automated classification of the changes based on their single-line description.""" changes = [ _get_change_from_line(f"LONG SHORT 2023-12-01 {description}", version="0.1.0") for description in descriptions ] classified_changes = _get_changes_classified( changes, with_breaking_changes=with_breaking_changes, maybe_with_new_features=maybe_with_new_features ) assert len(classified_changes.breaking_changes) == breaking_count assert len(classified_changes.features) == feature_count assert len(classified_changes.fixes) == bugfix_count assert len(classified_changes.other) == other_count
Mypy plugin entrypoint.
def plugin(version: str): """Mypy plugin entrypoint.""" return TypedDecoratorPlugin
Create and return a MockExecutor
def get_executor_under_test(dotted_path): """ Create and return a MockExecutor """ from airflow.executors.executor_loader import ExecutorLoader if dotted_path == "MockExecutor": from tests.test_utils.mock_executor import MockExecutor as executor else: executor = ExecutorLoader.load_executor(dotted_path) executor_cls = type(executor) # Change this to try other executors class ShortCircuitExecutor(ShortCircuitExecutorMixin, executor_cls): """ Placeholder class that implements the inheritance hierarchy """ job_runner = None return ShortCircuitExecutor
Delete all dag and task instances and then un_pause the Dag.
def reset_dag(dag, session): """ Delete all dag and task instances and then un_pause the Dag. """ import airflow.models DR = airflow.models.DagRun DM = airflow.models.DagModel TI = airflow.models.TaskInstance TF = airflow.models.TaskFail dag_id = dag.dag_id session.query(DM).filter(DM.dag_id == dag_id).update({"is_paused": False}) session.query(DR).filter(DR.dag_id == dag_id).delete() session.query(TI).filter(TI.dag_id == dag_id).delete() session.query(TF).filter(TF.dag_id == dag_id).delete()
Pause all Dags
def pause_all_dags(session): """ Pause all Dags """ from airflow.models.dag import DagModel session.query(DagModel).update({"is_paused": True})
Create `num_runs` of dag runs for sub-sequent schedules
def create_dag_runs(dag, num_runs, session): """ Create `num_runs` of dag runs for sub-sequent schedules """ from airflow.utils import timezone from airflow.utils.state import DagRunState try: from airflow.utils.types import DagRunType id_prefix = f"{DagRunType.SCHEDULED.value}__" except ImportError: from airflow.models.dagrun import DagRun id_prefix = DagRun.ID_PREFIX last_dagrun_data_interval = None for _ in range(num_runs): next_info = dag.next_dagrun_info(last_dagrun_data_interval) logical_date = next_info.logical_date dag.create_dagrun( run_id=f"{id_prefix}{logical_date.isoformat()}", execution_date=logical_date, start_date=timezone.utcnow(), state=DagRunState.RUNNING, external_trigger=False, session=session, ) last_dagrun_data_interval = next_info.data_interval
This script can be used to measure the total "scheduler overhead" of Airflow. By overhead we mean if the tasks executed instantly as soon as they are executed (i.e. they do nothing) how quickly could we schedule them. It will monitor the task completion of the Mock/stub executor (no actual tasks are run) and after the required number of dag runs for all the specified dags have completed all their tasks, it will cleanly shut down the scheduler. The dags you run with need to have an early enough start_date to create the desired number of runs. Care should be taken that other limits (DAG max_active_tasks, pool size etc) are not the bottleneck. This script doesn't help you in that regard. It is recommended to repeat the test at least 3 times (`--repeat=3`, the default) so that you can get somewhat-accurate variance on the reported timing numbers, but this can be disabled for longer runs if needed.
def main(num_runs, repeat, pre_create_dag_runs, executor_class, dag_ids): """ This script can be used to measure the total "scheduler overhead" of Airflow. By overhead we mean if the tasks executed instantly as soon as they are executed (i.e. they do nothing) how quickly could we schedule them. It will monitor the task completion of the Mock/stub executor (no actual tasks are run) and after the required number of dag runs for all the specified dags have completed all their tasks, it will cleanly shut down the scheduler. The dags you run with need to have an early enough start_date to create the desired number of runs. Care should be taken that other limits (DAG max_active_tasks, pool size etc) are not the bottleneck. This script doesn't help you in that regard. It is recommended to repeat the test at least 3 times (`--repeat=3`, the default) so that you can get somewhat-accurate variance on the reported timing numbers, but this can be disabled for longer runs if needed. """ # Turn on unit test mode so that we don't do any sleep() in the scheduler # loop - not needed on main, but this script can run against older # releases too! os.environ["AIRFLOW__CORE__UNIT_TEST_MODE"] = "True" os.environ["AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG"] = "500" # Set this so that dags can dynamically configure their end_date os.environ["AIRFLOW_BENCHMARK_MAX_DAG_RUNS"] = str(num_runs) os.environ["PERF_MAX_RUNS"] = str(num_runs) if pre_create_dag_runs: os.environ["AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE"] = "False" from airflow.jobs.job import Job from airflow.jobs.scheduler_job_runner import SchedulerJobRunner from airflow.models.dagbag import DagBag from airflow.utils import db dagbag = DagBag() dags = [] with db.create_session() as session: pause_all_dags(session) for dag_id in dag_ids: dag = dagbag.get_dag(dag_id) dag.sync_to_db(session=session) dags.append(dag) reset_dag(dag, session) next_info = dag.next_dagrun_info(None) for _ in range(num_runs - 1): next_info = dag.next_dagrun_info(next_info.data_interval) end_date = dag.end_date or dag.default_args.get("end_date") if end_date != next_info.logical_date: message = ( f"DAG {dag_id} has incorrect end_date ({end_date}) for number of runs! " f"It should be {next_info.logical_date}" ) sys.exit(message) if pre_create_dag_runs: create_dag_runs(dag, num_runs, session) ShortCircuitExecutor = get_executor_under_test(executor_class) executor = ShortCircuitExecutor(dag_ids_to_watch=dag_ids, num_runs=num_runs) scheduler_job = Job(executor=executor) job_runner = SchedulerJobRunner(job=scheduler_job, dag_ids=dag_ids, do_pickle=False) executor.job_runner = job_runner total_tasks = sum(len(dag.tasks) for dag in dags) if "PYSPY" in os.environ: pid = str(os.getpid()) filename = os.environ.get("PYSPY_O", "flame-" + pid + ".html") os.spawnlp(os.P_NOWAIT, "sudo", "sudo", "py-spy", "record", "-o", filename, "-p", pid, "--idle") times = [] # Need a lambda to refer to the _latest_ value for scheduler_job, not just # the initial one code_to_test = lambda: run_job(job=job_runner.job, execute_callable=job_runner._execute) for count in range(repeat): if not count: with db.create_session() as session: for dag in dags: reset_dag(dag, session) executor.reset(dag_ids) scheduler_job = Job(executor=executor) job_runner = SchedulerJobRunner(job=scheduler_job, dag_ids=dag_ids, do_pickle=False) executor.scheduler_job = scheduler_job gc.disable() start = time.perf_counter() code_to_test() times.append(time.perf_counter() - start) gc.enable() print(f"Run {count + 1} time: {times[-1]:.5f}") print() print() print(f"Time for {num_runs} dag runs of {len(dags)} dags with {total_tasks} total tasks: ", end="") if len(times) > 1: print(f"{statistics.mean(times):.4f}s (±{statistics.stdev(times):.3f}s)") else: print(f"{times[0]:.4f}s") print() print()
Wrapper function that calls the airflow resetdb function.
def reset_db(): """ Wrapper function that calls the airflow resetdb function. """ from airflow.utils.db import resetdb resetdb()
Run the scheduler job, selectively resetting the db before creating a ScheduleJob instance
def run_scheduler_job(with_db_reset=False) -> None: """ Run the scheduler job, selectively resetting the db before creating a ScheduleJob instance """ from airflow.jobs.scheduler_job_runner import SchedulerJobRunner if with_db_reset: reset_db() job_runner = SchedulerJobRunner(job=Job(), subdir=DAG_FOLDER, do_pickle=False, num_runs=3) run_job(job=job_runner.job, execute_callable=job_runner._execute)
Return True, if provided line embeds a query, else False
def is_query(line: str) -> bool: """ Return True, if provided line embeds a query, else False """ return "@SQLALCHEMY" in line and "|$" in line
Returns a list of Query objects that are expected to be run during the performance run.
def make_report() -> list[Query]: """ Returns a list of Query objects that are expected to be run during the performance run. """ queries = [] with open(LOG_FILE, "r+") as f: raw_queries = [line for line in f.readlines() if is_query(line)] for query in raw_queries: time, info, stack, sql = query.replace("@SQLALCHEMY ", "").split("|$") func, file, loc = info.split(":") file_name = file.rpartition("/")[-1] queries.append( Query( function=func.strip(), file=file_name.strip(), location=int(loc.strip()), sql=sql.strip(), stack=stack.strip(), time=float(time.strip()), ) ) return queries
Run the tests inside a scheduler and then return the elapsed time along with the queries that will be run.
def run_test() -> tuple[list[Query], float]: """ Run the tests inside a scheduler and then return the elapsed time along with the queries that will be run. """ if os.path.exists(LOG_FILE): os.remove(LOG_FILE) tic = monotonic() run_scheduler_job(with_db_reset=False) toc = monotonic() queries = make_report() return queries, toc - tic
Write results stats to a file.
def rows_to_csv(rows: list[dict], name: str | None = None) -> pd.DataFrame: """ Write results stats to a file. """ df = pd.DataFrame(rows) name = name or f"/files/sql_stats_{int(monotonic())}.csv" df.to_csv(name, index=False) print(f"Saved result to {name}") return df
Run the tests and write stats to a csv file.
def main() -> None: """ Run the tests and write stats to a csv file. """ reset_db() rows = [] times = [] for test_no in range(4): sleep(5) queries, exec_time = run_test() if test_no: times.append(exec_time) for qry in queries: info = qry.to_dict() info["test_no"] = test_no rows.append(info) rows_to_csv(rows, name="/files/sql_after_remote.csv") print(times) msg = "Time for %d dag runs: %.4fs" if len(times) > 1: print((msg + " (±%.3fs)") % (len(times), statistics.mean(times), statistics.stdev(times))) else: print(msg % (len(times), times[0]))
Parse a time string e.g. (2h13m) into a timedelta object. :param time_str: A string identifying a duration. (eg. 2h13m) :return datetime.timedelta: A datetime.timedelta object or "@once"
def parse_time_delta(time_str: str): """ Parse a time string e.g. (2h13m) into a timedelta object. :param time_str: A string identifying a duration. (eg. 2h13m) :return datetime.timedelta: A datetime.timedelta object or "@once" """ if (parts := RE_TIME_DELTA.match(time_str)) is None: msg = ( f"Could not parse any time information from '{time_str}'. " f"Examples of valid strings: '8h', '2d8h5m20s', '2m4s'" ) raise ValueError(msg) time_params = {name: float(param) for name, param in parts.groupdict().items() if param} return timedelta(**time_params)
Parse a schedule interval string e.g. (2h13m) or "@once". :param time_str: A string identifying a schedule interval. (eg. 2h13m, None, @once) :return datetime.timedelta: A datetime.timedelta object or "@once" or None
def parse_schedule_interval(time_str: str): """ Parse a schedule interval string e.g. (2h13m) or "@once". :param time_str: A string identifying a schedule interval. (eg. 2h13m, None, @once) :return datetime.timedelta: A datetime.timedelta object or "@once" or None """ if time_str == "None": return None if time_str == "@once": return "@once" return parse_time_delta(time_str)
Remove invalid characters for dag_id
def safe_dag_id(s: str) -> str: """ Remove invalid characters for dag_id """ return re.sub("[^0-9a-zA-Z_]+", "_", s)
Chain tasks as a binary tree where task i is child of task (i - 1) // 2 : t0 -> t1 -> t3 -> t7 | \ | -> t4 -> t8 | -> t2 -> t5 -> t9 \ -> t6
def chain_as_binary_tree(*tasks: BashOperator): r""" Chain tasks as a binary tree where task i is child of task (i - 1) // 2 : t0 -> t1 -> t3 -> t7 | \ | -> t4 -> t8 | -> t2 -> t5 -> t9 \ -> t6 """ for i in range(1, len(tasks)): tasks[i].set_downstream(tasks[(i - 1) // 2])
Chain tasks as a grid: t0 -> t1 -> t2 -> t3 | | | v v v t4 -> t5 -> t6 | | v v t7 -> t8 | v t9
def chain_as_grid(*tasks: BashOperator): """ Chain tasks as a grid: t0 -> t1 -> t2 -> t3 | | | v v v t4 -> t5 -> t6 | | v v t7 -> t8 | v t9 """ if len(tasks) > 100 * 99 / 2: raise ValueError("Cannot generate grid DAGs with lateral size larger than 100 tasks.") grid_size = next(n for n in range(100) if n * (n + 1) / 2 >= len(tasks)) def index(i, j): """ Return the index of node (i, j) on the grid. """ return int(grid_size * i - i * (i - 1) / 2 + j) for i in range(grid_size - 1): for j in range(grid_size - i - 1): if index(i + 1, j) < len(tasks): tasks[index(i + 1, j)].set_downstream(tasks[index(i, j)]) if index(i, j + 1) < len(tasks): tasks[index(i, j + 1)].set_downstream(tasks[index(i, j)])
Chain tasks as a star (all tasks are children of task 0) t0 -> t1 | -> t2 | -> t3 | -> t4 | -> t5
def chain_as_star(*tasks: BashOperator): """ Chain tasks as a star (all tasks are children of task 0) t0 -> t1 | -> t2 | -> t3 | -> t4 | -> t5 """ tasks[0].set_upstream(list(tasks[1:]))
Print the task_id and execution date.
def print_context(_, ti, **kwargs): """ Print the task_id and execution date. """ print(f"Running {ti.task_id} {ti.execution_date}") return "Whatever you return gets printed in the logs"
Generate a list of PythonOperator tasks. The generated tasks are set up to be dependent on the `deps` argument.
def generate_parallel_tasks(name_prefix, num_of_tasks, deps): """ Generate a list of PythonOperator tasks. The generated tasks are set up to be dependent on the `deps` argument. """ tasks = [] for t_id in range(num_of_tasks): run_this = PythonOperator( task_id=f"{name_prefix}_{t_id}", python_callable=print_context, ) run_this << deps tasks.append(run_this) return tasks
Update status of the issues regarding the AIP-47 migration.
def update_issue_status( github_token: str, max_issues: int | None, dry_run: bool, repository: str, start_from: int, verbose: bool, labels: str, ): """Update status of the issues regarding the AIP-47 migration.""" g = Github(github_token) repo = g.get_repo(repository) issues = repo.get_issues(labels=labels.split(","), state="all") max_issues = max_issues if max_issues is not None else issues.totalCount total_re_added = 0 total_completed = 0 total_count_done = 0 total_count_all = 0 num_issues = 0 completed_open_issues: list[Issue.Issue] = [] completed_closed_issues: list[Issue.Issue] = [] not_completed_closed_issues: list[Issue.Issue] = [] not_completed_opened_issues: list[Issue.Issue] = [] per_issue_num_done: dict[int, int] = {} per_issue_num_all: dict[int, int] = {} for issue in issues[start_from : start_from + max_issues]: console.print(f"[blue] {issue.id}: {issue.title}") new_body, count_re_added, count_completed, count_done, count_all = process_paths_from_body( issue.body, dry_run=dry_run, verbose=verbose ) if count_all == 0: continue if count_re_added != 0 or count_completed != 0: if dry_run: print(new_body) else: issue.edit(body=new_body) console.print() console.print(f"[blue]Summary of performed actions: for {issue.title}[/]") console.print(f" Re-added file number (still there): {count_re_added}") console.print(f" Completed file number: {count_completed}") console.print(f" Done {count_done}/{count_all} = {count_done / count_all:.2%}") console.print() total_re_added += count_re_added total_completed += count_completed total_count_done += count_done total_count_all += count_all per_issue_num_all[issue.id] = count_all per_issue_num_done[issue.id] = count_done if count_done == count_all: if issue.state == "closed": completed_closed_issues.append(issue) else: completed_open_issues.append(issue) else: if issue.state == "closed": not_completed_closed_issues.append(issue) else: not_completed_opened_issues.append(issue) num_issues += 1 console.print(f"[green]Summary of ALL actions: for {num_issues} issues[/]") console.print(f" Re-added file number: {total_re_added}") console.print(f" Completed file number: {total_completed}") console.print() console.print() console.print(f"[green]Summary of ALL issues: for {num_issues} issues[/]") console.print( f" Completed and closed issues: {len(completed_closed_issues)}/{num_issues}: " f"{len(completed_closed_issues) / num_issues:.2%}" ) console.print( f" Completed files {total_count_done}/{total_count_all} = " f"{total_count_done / total_count_all:.2%}" ) console.print() if not_completed_closed_issues: console.print("[yellow] Issues that are not completed and should be opened:[/]\n") for issue in not_completed_closed_issues: all = per_issue_num_all[issue.id] done = per_issue_num_done[issue.id] console.print(f" * [[yellow]{issue.title}[/]]({issue.html_url}): {done}/{all} : {done / all:.2%}") console.print() if completed_open_issues: console.print("[yellow] Issues that are completed and should be closed:[/]\n") for issue in completed_open_issues: console.print(rf" * [[yellow]{issue.title}[/]]({issue.html_url})") console.print() if not_completed_opened_issues: console.print("[yellow] Issues that are not completed and are still opened:[/]\n") for issue in not_completed_opened_issues: all = per_issue_num_all[issue.id] done = per_issue_num_done[issue.id] console.print(f" * [[yellow]{issue.title}[/]]({issue.html_url}): {done}/{all} : {done / all:.2%}") console.print() if completed_closed_issues: console.print("[green] Issues that are completed and are already closed:[/]\n") for issue in completed_closed_issues: console.print(rf" * [[green]{issue.title}[/]]({issue.html_url})") console.print() console.print()
Simple test which reproduce setup docker-compose environment and trigger example dag.
def test_trigger_dag_and_wait_for_result(default_docker_image, tmp_path_factory, monkeypatch): """Simple test which reproduce setup docker-compose environment and trigger example dag.""" tmp_dir = tmp_path_factory.mktemp("airflow-quick-start") monkeypatch.setenv("AIRFLOW_IMAGE_NAME", default_docker_image) compose_file_path = ( SOURCE_ROOT / "docs" / "apache-airflow" / "howto" / "docker-compose" / "docker-compose.yaml" ) copyfile(compose_file_path, tmp_dir / "docker-compose.yaml") # Create required directories for docker compose quick start howto for subdir in ("dags", "logs", "plugins"): (tmp_dir / subdir).mkdir() dot_env_file = tmp_dir / ".env" dot_env_file.write_text(f"AIRFLOW_UID={os.getuid()}\n") print(" .env file content ".center(72, "=")) print(dot_env_file.read_text()) compose_version = None try: compose_version = docker.compose.version() except DockerException: pytest.fail("`docker compose` not available. Make sure compose plugin is installed") try: docker_version = docker.version() except NotImplementedError: docker_version = run_command(["docker", "version"], return_output=True) compose = DockerClient(compose_project_name="quick-start", compose_project_directory=tmp_dir).compose compose.down(remove_orphans=True, volumes=True, quiet=True) try: compose.up(detach=True, wait=True, color=not os.environ.get("NO_COLOR")) api_request("PATCH", path=f"dags/{DAG_ID}", json={"is_paused": False}) api_request("POST", path=f"dags/{DAG_ID}/dagRuns", json={"dag_run_id": DAG_RUN_ID}) wait_for_terminal_dag_state(dag_id=DAG_ID, dag_run_id=DAG_RUN_ID) dag_state = api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}").get("state") assert dag_state == "success" except Exception: print("HTTP: GET health") pprint(api_request("GET", "health")) print(f"HTTP: GET dags/{DAG_ID}/dagRuns") pprint(api_request("GET", f"dags/{DAG_ID}/dagRuns")) print(f"HTTP: GET dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}/taskInstances") pprint(api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}/taskInstances")) print(" Docker Version ".center(72, "=")) print(docker_version) print(" Docker Compose Version ".center(72, "=")) print(compose_version) print(" Compose Config ".center(72, "=")) print(json.dumps(compose.config(return_json=True), indent=4)) for service in compose.ps(all=True): print(f" Service: {service.name} ".center(72, "-")) print(" Service State ".center(72, ".")) pprint(service.state) print(" Service Config ".center(72, ".")) pprint(service.config) print(" Service Logs ".center(72, ".")) print(service.logs()) raise finally: if not os.environ.get("SKIP_DOCKER_COMPOSE_DELETION"): compose.down(remove_orphans=True, volumes=True, quiet=True) print("Docker compose instance deleted") else: print("Skipping docker-compose deletion") print() print("You can run inspect your docker-compose by running commands starting with:") quoted_command = map(shlex.quote, map(str, compose.docker_compose_cmd)) print(" ".join(quoted_command))
Use a predicate to partition entries into false entries and true entries
def partition(pred: Callable[[T], bool], iterable: Iterable[T]) -> tuple[Iterable[T], Iterable[T]]: """Use a predicate to partition entries into false entries and true entries""" iter_1, iter_2 = itertools.tee(iterable) return itertools.filterfalse(pred, iter_1), filter(pred, iter_2)