response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Performs single package docs build.
def perform_docs_build_for_single_package(build_specification: BuildSpecification) -> BuildDocsResult: """Performs single package docs build.""" builder = AirflowDocsBuilder(package_name=build_specification.package_name) console.print(f"[info]{build_specification.package_name:60}:[/] Building documentation") result = BuildDocsResult( package_name=build_specification.package_name, errors=builder.build_sphinx_docs( verbose=build_specification.verbose, ), log_file_name=builder.log_build_filename, ) return result
Performs single package spell check.
def perform_spell_check_for_single_package(build_specification: BuildSpecification) -> SpellCheckResult: """Performs single package spell check.""" builder = AirflowDocsBuilder(package_name=build_specification.package_name) console.print(f"[info]{build_specification.package_name:60}:[/] Checking spelling started") spelling_errors, build_errors = builder.check_spelling( verbose=build_specification.verbose, ) result = SpellCheckResult( package_name=build_specification.package_name, spelling_errors=spelling_errors, build_errors=build_errors, log_file_name=builder.log_spelling_filename, ) console.print(f"[info]{build_specification.package_name:60}:[/] Checking spelling completed") return result
Builds documentation for all packages and combines errors.
def build_docs_for_packages( packages_to_build: list[str], docs_only: bool, spellcheck_only: bool, jobs: int, verbose: bool, ) -> tuple[dict[str, list[DocBuildError]], dict[str, list[SpellingError]]]: """Builds documentation for all packages and combines errors.""" all_build_errors: dict[str, list[DocBuildError]] = defaultdict(list) all_spelling_errors: dict[str, list[SpellingError]] = defaultdict(list) with with_group("Cleaning documentation files"): for package_name in packages_to_build: console.print(f"[info]{package_name:60}:[/] Cleaning files") builder = AirflowDocsBuilder(package_name=package_name) builder.clean_files() if jobs > 1: run_in_parallel( all_build_errors=all_build_errors, all_spelling_errors=all_spelling_errors, packages_to_build=packages_to_build, docs_only=docs_only, jobs=jobs, spellcheck_only=spellcheck_only, verbose=verbose, ) else: run_sequentially( all_build_errors=all_build_errors, all_spelling_errors=all_spelling_errors, packages_to_build=packages_to_build, docs_only=docs_only, spellcheck_only=spellcheck_only, verbose=verbose, ) return all_build_errors, all_spelling_errors
Run both - spellcheck and docs build sequentially without multiprocessing
def run_sequentially( all_build_errors, all_spelling_errors, packages_to_build, docs_only, spellcheck_only, verbose, ): """Run both - spellcheck and docs build sequentially without multiprocessing""" if not spellcheck_only: for package_name in packages_to_build: build_result = perform_docs_build_for_single_package( build_specification=BuildSpecification( package_name=package_name, verbose=verbose, ) ) if build_result.errors: all_build_errors[package_name].extend(build_result.errors) print_build_output(build_result) if not docs_only: for package_name in packages_to_build: spellcheck_result = perform_spell_check_for_single_package( build_specification=BuildSpecification( package_name=package_name, verbose=verbose, ) ) if spellcheck_result.spelling_errors: all_spelling_errors[package_name].extend(spellcheck_result.spelling_errors) if spellcheck_only: all_build_errors[package_name].extend(spellcheck_result.build_errors) print_spelling_output(spellcheck_result)
Run both - spellcheck and docs build sequentially without multiprocessing
def run_in_parallel( all_build_errors: dict[str, list[DocBuildError]], all_spelling_errors: dict[str, list[SpellingError]], packages_to_build: list[str], docs_only: bool, jobs: int, spellcheck_only: bool, verbose: bool, ): """Run both - spellcheck and docs build sequentially without multiprocessing""" with multiprocessing.Pool(processes=jobs) as pool: if not spellcheck_only: run_docs_build_in_parallel( all_build_errors=all_build_errors, packages_to_build=packages_to_build, verbose=verbose, pool=pool, ) if not docs_only: run_spell_check_in_parallel( all_spelling_errors=all_spelling_errors, all_build_errors=all_build_errors, packages_to_build=packages_to_build, verbose=verbose, pool=pool, )
Prints output of docs build job.
def print_build_output(result: BuildDocsResult): """Prints output of docs build job.""" with with_group(f"{TEXT_RED}Output for documentation build {result.package_name}{TEXT_RESET}"): console.print() console.print(f"[info]{result.package_name:60}: " + "#" * 80) with open(result.log_file_name) as output: for line in output.read().splitlines(): console.print(f"{result.package_name:60} {line}") console.print(f"[info]{result.package_name:60}: " + "#" * 80)
Runs documentation building in parallel.
def run_docs_build_in_parallel( all_build_errors: dict[str, list[DocBuildError]], packages_to_build: list[str], verbose: bool, pool: Any, # Cannot use multiprocessing types here: https://github.com/python/typeshed/issues/4266 ): """Runs documentation building in parallel.""" doc_build_specifications: list[BuildSpecification] = [] with with_group("Scheduling documentation to build"): for package_name in packages_to_build: console.print(f"[info]{package_name:60}:[/] Scheduling documentation to build") doc_build_specifications.append( BuildSpecification( package_name=package_name, verbose=verbose, ) ) with with_group("Running docs building"): console.print() result_list = pool.map(perform_docs_build_for_single_package, doc_build_specifications) for result in result_list: if result.errors: all_build_errors[result.package_name].extend(result.errors) print_build_output(result)
Prints output of spell check job.
def print_spelling_output(result: SpellCheckResult): """Prints output of spell check job.""" with with_group(f"{TEXT_RED}Output for spelling check: {result.package_name}{TEXT_RESET}"): console.print() console.print(f"[info]{result.package_name:60}: " + "#" * 80) with open(result.log_file_name) as output: for line in output.read().splitlines(): console.print(f"{result.package_name:60} {line}") console.print(f"[info]{result.package_name:60}: " + "#" * 80) console.print()
Runs spell check in parallel.
def run_spell_check_in_parallel( all_spelling_errors: dict[str, list[SpellingError]], all_build_errors: dict[str, list[DocBuildError]], packages_to_build: list[str], verbose: bool, pool, ): """Runs spell check in parallel.""" spell_check_specifications: list[BuildSpecification] = [] with with_group("Scheduling spell checking of documentation"): for package_name in packages_to_build: console.print(f"[info]{package_name:60}:[/] Scheduling spellchecking") spell_check_specifications.append(BuildSpecification(package_name=package_name, verbose=verbose)) with with_group("Running spell checking of documentation"): console.print() result_list = pool.map(perform_spell_check_for_single_package, spell_check_specifications) for result in result_list: if result.spelling_errors: all_spelling_errors[result.package_name].extend(result.spelling_errors) all_build_errors[result.package_name].extend(result.build_errors) print_spelling_output(result)
Displays a summary that contains information on the number of errors in each packages
def display_packages_summary( build_errors: dict[str, list[DocBuildError]], spelling_errors: dict[str, list[SpellingError]] ): """Displays a summary that contains information on the number of errors in each packages""" packages_names = {*build_errors.keys(), *spelling_errors.keys()} tabular_data = [ { "Package name": f"[info]{package_name}[/]", "Count of doc build errors": len(build_errors.get(package_name, [])), "Count of spelling errors": len(spelling_errors.get(package_name, [])), } for package_name in sorted(packages_names, key=lambda k: k or "") ] console.print("#" * 20, " Packages errors summary ", "#" * 20) console.print(tabulate(tabular_data=tabular_data, headers="keys")) console.print("#" * 50)
Prints build errors and exists.
def print_build_errors_and_exit( build_errors: dict[str, list[DocBuildError]], spelling_errors: dict[str, list[SpellingError]], spellcheck_only: bool, ) -> None: """Prints build errors and exists.""" if build_errors or spelling_errors: if build_errors: if spellcheck_only: console.print("f[warning]There were some build errors remaining.") console.print() else: display_errors_summary(build_errors) console.print() if spelling_errors: display_spelling_error_summary(spelling_errors) console.print() console.print("The documentation has errors.") display_packages_summary(build_errors, spelling_errors) console.print() console.print(CHANNEL_INVITATION) sys.exit(1) else: console.print("[green]Documentation build is successful[/]")
Main code
def main(): """Main code""" args = _get_parser().parse_args() available_packages = get_available_packages() docs_only = args.docs_only spellcheck_only = args.spellcheck_only disable_provider_checks = args.disable_provider_checks disable_checks = args.disable_checks package_filters = args.package_filter with with_group("Available packages"): for pkg in sorted(available_packages): console.print(f" - {pkg}") if package_filters: console.print("Current package filters: ", package_filters) packages_to_build = process_package_filters(available_packages, package_filters) with with_group("Fetching inventories"): # Inventories that could not be retrieved should be built first. This may mean this is a # new package. packages_without_inventories = fetch_inventories() normal_packages, priority_packages = partition( lambda d: d in packages_without_inventories, packages_to_build ) normal_packages, priority_packages = list(normal_packages), list(priority_packages) jobs = args.jobs if args.jobs != 0 else os.cpu_count() with with_group( f"Documentation will be built for {len(packages_to_build)} package(s) with {jobs} parallel jobs" ): for pkg_no, pkg in enumerate(packages_to_build, start=1): console.print(f"{pkg_no}. {pkg}") all_build_errors: dict[str | None, list[DocBuildError]] = {} all_spelling_errors: dict[str | None, list[SpellingError]] = {} if priority_packages: # Build priority packages package_build_errors, package_spelling_errors = build_docs_for_packages( packages_to_build=priority_packages, docs_only=docs_only, spellcheck_only=spellcheck_only, jobs=jobs, verbose=args.verbose, ) if package_build_errors: all_build_errors.update(package_build_errors) if package_spelling_errors: all_spelling_errors.update(package_spelling_errors) # Build normal packages # If only one inventory is missing, the remaining packages are correct. If we are missing # two or more inventories, it is better to try to build for all packages as the previous packages # may have failed as well. package_build_errors, package_spelling_errors = build_docs_for_packages( packages_to_build=packages_to_build if len(priority_packages) > 1 else normal_packages, docs_only=docs_only, spellcheck_only=spellcheck_only, jobs=jobs, verbose=args.verbose, ) if package_build_errors: all_build_errors.update(package_build_errors) if package_spelling_errors: all_spelling_errors.update(package_spelling_errors) if not args.one_pass_only: # Build documentation for some packages again if it can help them. package_build_errors = retry_building_docs_if_needed( all_build_errors=all_build_errors, all_spelling_errors=all_spelling_errors, args=args, docs_only=docs_only, jobs=jobs, package_build_errors=package_build_errors, originally_built_packages=packages_to_build, # If spellchecking fails, we need to rebuild all packages first in case some references # are broken between packages rebuild_all_packages=spellcheck_only, ) # And try again in case one change spans across three-level dependencies. package_build_errors = retry_building_docs_if_needed( all_build_errors=all_build_errors, all_spelling_errors=all_spelling_errors, args=args, docs_only=docs_only, jobs=jobs, package_build_errors=package_build_errors, originally_built_packages=packages_to_build, # In the 3rd pass we only rebuild packages that failed in the 2nd pass # no matter if we do spellcheck-only build rebuild_all_packages=False, ) if spellcheck_only: # And in case of spellcheck-only, we add a 4th pass to account for A->B-C case # For spellcheck-only build, the first pass does not solve any of the dependency # Issues, they only start getting solved and the 2nd pass so we might need to do one more pass package_build_errors = retry_building_docs_if_needed( all_build_errors=all_build_errors, all_spelling_errors=all_spelling_errors, args=args, docs_only=docs_only, jobs=jobs, package_build_errors=package_build_errors, originally_built_packages=packages_to_build, # In the 4th pass we only rebuild packages that failed in the 3rd pass # no matter if we do spellcheck-only build rebuild_all_packages=False, ) if not disable_checks: general_errors = lint_checks.run_all_check(disable_provider_checks=disable_provider_checks) if general_errors: all_build_errors[None] = general_errors dev_index_generator.generate_index(f"{DOCS_DIR}/_build/index.html") if not package_filters: _promote_new_flags() if os.path.exists(PROVIDER_INIT_FILE): os.remove(PROVIDER_INIT_FILE) print_build_errors_and_exit( all_build_errors, all_spelling_errors, spellcheck_only, )
Sets the plugin up
def setup(app: Sphinx): """Sets the plugin up""" app.connect("config-inited", _create_init_py) return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
Gets template fields for specific operator class. :param env: env config :param fullname: Full path to operator class. For example: ``airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator`` :return: List of template field
def get_template_field(env, fullname) -> list[str]: """ Gets template fields for specific operator class. :param env: env config :param fullname: Full path to operator class. For example: ``airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator`` :return: List of template field """ modname, classname = fullname.rsplit(".", 1) try: with mock(env.config.autodoc_mock_imports): mod = import_module(modname) except ImportError: raise RoleException(f"Error loading {modname} module.") clazz = getattr(mod, classname) if not clazz: raise RoleException(f"Error finding {classname} class in {modname} module.") template_fields = getattr(clazz, "template_fields") if not template_fields: raise RoleException(f"Could not find the template fields for {classname} class in {modname} module.") return list(template_fields)
A role that allows you to include a list of template fields in the middle of the text. This is especially useful when writing guides describing how to use the operator. The result is a list of fields where each field is shorted in the literal block. Sample usage:: :template-fields:`airflow.operators.bash.BashOperator` For further information look at: * [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted Text Roles)
def template_field_role( app, typ, rawtext, text, lineno, inliner, options=None, content=None, ): """ A role that allows you to include a list of template fields in the middle of the text. This is especially useful when writing guides describing how to use the operator. The result is a list of fields where each field is shorted in the literal block. Sample usage:: :template-fields:`airflow.operators.bash.BashOperator` For further information look at: * [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted Text Roles) """ if options is None: options = {} if content is None: content = [] text = utils.unescape(text) try: template_fields = get_template_field(app.env, text) except RoleException as e: msg = inliner.reporter.error( f"invalid class name {text} \n{e}", line=lineno, ) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = nodes.inline(rawtext=rawtext) for i, field in enumerate(template_fields): if i != 0: node += nodes.Text(", ") node += nodes.literal(field, "", nodes.Text(field)) return [node], []
Sets the extension up
def setup(app): """Sets the extension up""" from docutils.parsers.rst import roles roles.register_local_role("template-fields", partial(template_field_role, app)) return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
Registers source code. :param app: application :param env: environment of the plugin :param modname: name of the module to load :return: True if the code is registered successfully, False otherwise
def register_source(app, env, modname): """ Registers source code. :param app: application :param env: environment of the plugin :param modname: name of the module to load :return: True if the code is registered successfully, False otherwise """ entry = env._viewcode_modules.get(modname, None) if entry is False: print(f"[{modname}] Entry is false for ") return False code_tags = app.emit_firstresult("viewcode-find-source", modname) if code_tags is None: try: analyzer = ModuleAnalyzer.for_module(modname) except Exception as ex: logger.info( 'Module "%s" could not be loaded. Full source will not be available. "%s"', modname, ex ) # We cannot use regular warnings or exception methods because those warnings are interpreted # by running python process and converted into "real" warnings, so we need to print the # traceback here at info level tb = traceback.format_exc() logger.info("%s", tb) env._viewcode_modules[modname] = False return False if not isinstance(analyzer.code, str): code = analyzer.code.decode(analyzer.encoding) else: code = analyzer.code analyzer.find_tags() tags = analyzer.tags else: code, tags = code_tags if entry is None or entry[0] != code: entry = code, tags, {}, "" env._viewcode_modules[modname] = entry return True
Creates documentation node for example include. :param env: environment of the documentation :param relative_path: path of the code :param show_button: whether to show "view code" button :return paragraph with the node
def create_node(env, relative_path, show_button): """ Creates documentation node for example include. :param env: environment of the documentation :param relative_path: path of the code :param show_button: whether to show "view code" button :return paragraph with the node """ pagename = "_modules/" + relative_path[:-3] header_classes = ["example-header"] if show_button: header_classes += ["example-header--with-button"] paragraph = nodes.paragraph(relative_path, classes=header_classes) paragraph += nodes.inline("", relative_path, classes=["example-title"]) if show_button: pending_ref = viewcode_anchor( reftarget=pagename, refid="", refdoc=env.docname, classes=["example-header-button viewcode-button"], ) pending_ref += nodes.inline("", _("View Source")) paragraph += pending_ref return paragraph
Reads documentation tree for the application and register sources in the generated documentation. :param app: application :param doctree: documentation tree :return None
def doctree_read(app, doctree): """ Reads documentation tree for the application and register sources in the generated documentation. :param app: application :param doctree: documentation tree :return None """ env = app.builder.env if not hasattr(env, "_viewcode_modules"): env._viewcode_modules = {} if app.builder.name == "singlehtml": return for objnode in doctree.traverse(ExampleHeader): filepath = objnode.get("filename") relative_path = os.path.relpath( filepath, os.path.commonprefix([app.config.exampleinclude_sourceroot, filepath]) ) modname = relative_path.replace("/", ".")[:-3] show_button = register_source(app, env, modname) onlynode = create_node(env, relative_path, show_button) objnode.replace_self(onlynode)
Sets the plugin up and returns configuration of the plugin. :param app: application. :return json description of the configuration that is needed by the plugin.
def setup(app): """ Sets the plugin up and returns configuration of the plugin. :param app: application. :return json description of the configuration that is needed by the plugin. """ directives.register_directive("exampleinclude", ExampleInclude) app.connect("doctree-read", doctree_read) app.add_config_value("exampleinclude_sourceroot", None, "env") if not airflow_theme_is_available: # Sphinx airflow theme has its own styles. app.add_css_file("exampleinclude.css") return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
Sphinx "build-finished" event handler.
def build_postprocess(app, exception): """Sphinx "build-finished" event handler.""" from sphinx.builders import html as builders if exception or not isinstance(app.builder, builders.StandaloneHTMLBuilder): return global_substitutions = app.config.global_substitutions # Replace `|version|` in the docker-compose.yaml that requires manual substitutions for path in app.config.html_extra_with_substitutions: with open(path) as file: with open(os.path.join(app.outdir, os.path.basename(path)), "w") as output_file: for line in file: output_file.write(_manual_substitution(line, global_substitutions)) # Replace `|version|` in the installation files that requires manual substitutions (in links) for path in app.config.manual_substitutions_in_generated_html: with open(os.path.join(app.outdir, os.path.dirname(path), os.path.basename(path))) as input_file: content = input_file.readlines() with open( os.path.join(app.outdir, os.path.dirname(path), os.path.basename(path)), "w" ) as output_file: for line in content: output_file.write(_manual_substitution(line, global_substitutions))
Setup plugin
def setup(app): """Setup plugin""" app.connect("build-finished", build_postprocess) app.add_config_value("html_extra_with_substitutions", [], "html", [str]) app.add_config_value("manual_substitutions_in_generated_html", [], "html", [str]) app.add_config_value("global_substitutions", {}, "html", [dict]) return { "parallel_write_safe": True, "parallel_read_safe": True, }
Sphinx "build-finished" event handler.
def fix_provider_references(app, exception): """Sphinx "build-finished" event handler.""" from sphinx.builders import html as builders if exception or not isinstance(app.builder, builders.StandaloneHTMLBuilder): return # Replace `|version|` in the files that require manual substitution for path in Path(app.outdir).rglob("*.html"): if path.exists(): lines = path.read_text().splitlines(True) with path.open("w") as output_file: for line in lines: output_file.write(line.replace("|version|", app.config.version))
Setup plugin
def setup(app): """Setup plugin""" app.connect("build-finished", fix_provider_references) return { "parallel_write_safe": True, "parallel_read_safe": True, }
Setup plugin
def setup(app): """Setup plugin""" app.add_directive("operators-hooks-ref", OperatorsHooksReferenceDirective) app.add_directive("transfers-ref", TransfersReferenceDirective) app.add_directive("airflow-logging", LoggingDirective) app.add_directive("airflow-auth-backends", AuthBackendDirective) app.add_directive("airflow-configurations", AuthConfigurations) app.add_directive("airflow-secrets-backends", SecretsBackendDirective) app.add_directive("airflow-connections", ConnectionsDirective) app.add_directive("airflow-extra-links", ExtraLinksDirective) app.add_directive("airflow-notifications", NotificationsDirective) app.add_directive("airflow-executors", ExecutorsDirective) app.add_directive("airflow-deferrable-operators", DeferrableOperatorDirective) app.add_directive("airflow-deprecations", DeprecationsDirective) app.add_directive("airflow-dataset-schemes", DatasetSchemeDirective) return {"parallel_read_safe": True, "parallel_write_safe": True}
Render tables with integrations
def cli(): """Render tables with integrations"""
Renders Operators ahd Hooks content
def operators_and_hooks(tag: Iterable[str], header_separator: str): """Renders Operators ahd Hooks content""" print(_render_operator_content(tags=set(tag) if tag else None, header_separator=header_separator))
Renders Transfers content
def transfers(tag: Iterable[str], header_separator: str): """Renders Transfers content""" print(_render_transfer_content(tags=set(tag) if tag else None, header_separator=header_separator))
Renders Logger content
def logging(header_separator: str): """Renders Logger content""" print( _common_render_list_content( header_separator=header_separator, resource_type="logging", template="logging.rst.jinja2" ) )
Renders Logger content
def auth_backends(header_separator: str): """Renders Logger content""" print( _common_render_list_content( header_separator=header_separator, resource_type="auth-backends", template="auth_backend.rst.jinja2", ) )
Renders Secret Backends content
def secret_backends(header_separator: str): """Renders Secret Backends content""" print( _common_render_list_content( header_separator=header_separator, resource_type="secrets-backends", template="secret_backend.rst.jinja2", ) )
Renders Connections content
def connections(header_separator: str): """Renders Connections content""" print( _common_render_list_content( header_separator=header_separator, resource_type="connection-types", template="connections.rst.jinja2", ) )
Renders Extra links content
def extra_links(header_separator: str): """Renders Extra links content""" print( _common_render_list_content( header_separator=header_separator, resource_type="extra-links", template="extra_links.rst.jinja2" ) )
Renders Deferrable Operators content
def deferrable_operators(tag: Iterable[str], header_separator: str): """Renders Deferrable Operators content""" print(_render_deferrable_operator_content(header_separator=header_separator))
Extracts classes and its information from a Python module file. The function parses the specified module file and registers all classes. The registry for each class includes the module filename, methods, base classes and any additional class extras provided. :param module_filepath: The file path of the module. :param class_extras: Additional information to include in each class's registry. :return: A dictionary with class names as keys and their corresponding information.
def _get_module_class_registry( module_filepath: str, class_extras: dict[str, Any] ) -> dict[str, dict[str, Any]]: """Extracts classes and its information from a Python module file. The function parses the specified module file and registers all classes. The registry for each class includes the module filename, methods, base classes and any additional class extras provided. :param module_filepath: The file path of the module. :param class_extras: Additional information to include in each class's registry. :return: A dictionary with class names as keys and their corresponding information. """ with open(module_filepath) as file: ast_obj = ast.parse(file.read()) module_class_registry = { node.name: { "module_filepath": module_filepath, "methods": {n.name for n in ast.walk(node) if isinstance(n, ast.FunctionDef)}, "base_classes": [b.id for b in node.bases if isinstance(b, ast.Name)], **class_extras, } for node in ast_obj.body if isinstance(node, ast.ClassDef) } return module_class_registry
Determines if a class or its bases in the registry have any of the specified methods. :param class_name: The name of the class to check. :param method_names: A list of names of methods to search for. :param class_registry: A dictionary representing the class registry, where each key is a class name and the value is its metadata. :return: True if any of the specified methods are found in the class or its base classes; False otherwise. Example: >>> example_class_registry = { ... "MyClass": {"methods": {"foo", "bar"}, "base_classes": ["BaseClass"]}, ... "BaseClass": {"methods": {"base_foo"}, "base_classes": []}, ... } >>> _has_method("MyClass", ["foo"], example_class_registry) True >>> _has_method("MyClass", ["base_foo"], example_class_registry) True >>> _has_method("MyClass", ["not_a_method"], example_class_registry) False
def _has_method( class_name: str, method_names: Iterable[str], class_registry: dict[str, dict[str, Any]] ) -> bool: """Determines if a class or its bases in the registry have any of the specified methods. :param class_name: The name of the class to check. :param method_names: A list of names of methods to search for. :param class_registry: A dictionary representing the class registry, where each key is a class name and the value is its metadata. :return: True if any of the specified methods are found in the class or its base classes; False otherwise. Example: >>> example_class_registry = { ... "MyClass": {"methods": {"foo", "bar"}, "base_classes": ["BaseClass"]}, ... "BaseClass": {"methods": {"base_foo"}, "base_classes": []}, ... } >>> _has_method("MyClass", ["foo"], example_class_registry) True >>> _has_method("MyClass", ["base_foo"], example_class_registry) True >>> _has_method("MyClass", ["not_a_method"], example_class_registry) False """ if class_name in class_registry: if any(method in class_registry[class_name]["methods"] for method in method_names): return True for base_name in class_registry[class_name]["base_classes"]: if _has_method(base_name, method_names, class_registry): return True return False
Builds a registry of classes from YAML configuration files. This function scans through YAML configuration files to build a registry of classes. It parses each YAML file to get the provider's name and registers classes from Python module files within the provider's directory, excluding '__init__.py'. :return: A dictionary with provider names as keys and a dictionary of classes as values.
def _get_providers_class_registry() -> dict[str, dict[str, Any]]: """Builds a registry of classes from YAML configuration files. This function scans through YAML configuration files to build a registry of classes. It parses each YAML file to get the provider's name and registers classes from Python module files within the provider's directory, excluding '__init__.py'. :return: A dictionary with provider names as keys and a dictionary of classes as values. """ class_registry = {} for provider_yaml_path in get_provider_yaml_paths(): provider_yaml_content = yaml.safe_load(Path(provider_yaml_path).read_text()) for root, _, file_names in os.walk(Path(provider_yaml_path).parent): for file_name in file_names: module_filepath = f"{os.path.relpath(root)}/{file_name}" if not module_filepath.endswith(".py") or module_filepath == "__init__.py": continue module_registry = _get_module_class_registry( module_filepath=module_filepath, class_extras={"provider_name": provider_yaml_content["package-name"]}, ) class_registry.update(module_registry) return class_registry
Setup plugin
def setup(app): """Setup plugin""" app.add_directive("airflow-providers-openlineage-supported-classes", OpenLineageSupportedClassesDirective) return {"parallel_read_safe": True, "parallel_write_safe": True}
Setup plugin
def setup(app: Sphinx): """Setup plugin""" app.setup_extension("sphinx_jinja") app.connect("config-inited", _on_config_inited) app.add_crossref_type( directivename="provider", rolename="provider", ) return {"parallel_read_safe": True, "parallel_write_safe": True}
Sets the plugin up and returns configuration of the plugin. :param app: application. :return json description of the configuration that is needed by the plugin.
def setup(app: Sphinx): """ Sets the plugin up and returns configuration of the plugin. :param app: application. :return json description of the configuration that is needed by the plugin. """ app.connect("config-inited", _create_init_py) return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
Returns list of provider.yaml files
def get_provider_yaml_paths(): """Returns list of provider.yaml files""" return sorted(glob(f"{ROOT_DIR}/airflow/providers/**/provider.yaml", recursive=True))
Load all data from providers files :return: A list containing the contents of all provider.yaml files.
def load_package_data(include_suspended: bool = False) -> list[dict[str, Any]]: """ Load all data from providers files :return: A list containing the contents of all provider.yaml files. """ schema = _load_schema() result = [] for provider_yaml_path in get_provider_yaml_paths(): with open(provider_yaml_path) as yaml_file: provider = yaml.safe_load(yaml_file) try: jsonschema.validate(provider, schema=schema) except jsonschema.ValidationError as ex: msg = f"Unable to parse: {provider_yaml_path}. Original error {type(ex).__name__}: {ex}" raise RuntimeError(msg) if provider["state"] == "suspended" and not include_suspended: continue provider_yaml_dir = os.path.dirname(provider_yaml_path) provider["python-module"] = _filepath_to_module(provider_yaml_dir) provider["package-dir"] = provider_yaml_dir provider["system-tests-dir"] = _filepath_to_system_tests(provider_yaml_dir) result.append(provider) return result
Generate redirects files.
def generate_redirects(app): """Generate redirects files.""" redirect_file_path = os.path.join(app.srcdir, app.config.redirects_file) if not os.path.exists(redirect_file_path): log.info("Could not found the redirect file: %s", redirect_file_path) return in_suffix = next(iter(app.config.source_suffix.keys())) if not isinstance(app.builder, builders.StandaloneHTMLBuilder): return with open(redirect_file_path) as redirects: for line in redirects.readlines(): # Skip empty line if not line.strip(): continue # Skip comments if line.startswith("#"): continue # Split line into the original path `from_path` and where the URL should redirect to `to_path` from_path, _, to_path = line.rstrip().partition(" ") log.debug("Redirecting '%s' to '%s'", from_path, to_path) # in_suffix is often ".rst" from_path = from_path.replace(in_suffix, ".html") to_path = to_path.replace(in_suffix, ".html") to_path_prefix = f"..{os.path.sep}" * (len(from_path.split(os.path.sep)) - 1) # The redirect path needs to move back to the root of the apache-airflow docs directory # or the root of the docs directory altogether for provider packages. if "../" and "providers" in to_path: to_path_prefix = f"..{os.path.sep}" * (len(from_path.split(os.path.sep))) else: to_path_prefix = f"..{os.path.sep}" * (len(from_path.split(os.path.sep)) - 1) to_path = to_path_prefix + to_path log.debug("Resolved redirect '%s' to '%s'", from_path, to_path) # This will be used to save an HTML file with `TEMPLATE` formatted redirected_filename = os.path.join(app.builder.outdir, from_path) redirected_directory = os.path.dirname(redirected_filename) os.makedirs(redirected_directory, exist_ok=True) with open(redirected_filename, "w") as f: f.write(TEMPLATE.format(to_path))
Setup plugin
def setup(app): """Setup plugin""" app.add_config_value("redirects_file", "redirects", "env") app.connect("builder-inited", generate_redirects) return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
Sets the transform up
def setup(app): """Sets the transform up""" app.add_post_transform(TrimDocMarkerFlagsTransform) return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
Return full path to the user-specific cache dir for this application
def _user_cache_dir(appname=None): """Return full path to the user-specific cache dir for this application""" if sys.platform == "win32": # Windows has a complex procedure to download the App Dir directory because this directory can be # changed in window registry, so i use temporary directory for cache path = os.path.join(tempfile.gettempdir(), appname) elif sys.platform == "darwin": path = os.path.expanduser("~/Library/Caches") else: path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) path = os.path.join(path, appname) return path
Fetch URL to local cache and returns path.
def fetch_and_cache(script_url: str, output_filename: str): """Fetch URL to local cache and returns path.""" cache_key = _gethash(script_url) cache_dir = _user_cache_dir("redoc-doc") cache_metadata_filepath = os.path.join(cache_dir, "cache-metadata.json") cache_filepath = os.path.join(cache_dir, f"{cache_key}-{output_filename}") # Create cache directory os.makedirs(cache_dir, exist_ok=True) # Load cache metadata cache_metadata: dict[str, str] = {} if os.path.exists(cache_metadata_filepath): try: with open(cache_metadata_filepath) as cache_file: cache_metadata = json.load(cache_file) except json.JSONDecodeError: os.remove(cache_metadata_filepath) etag = cache_metadata.get(cache_key) # If we have a file and etag, check the fast path if os.path.exists(cache_filepath) and etag: res = requests.get(script_url, headers={"If-None-Match": etag}) if res.status_code == 304: return cache_filepath # Slow patch res = requests.get(script_url) res.raise_for_status() with open(cache_filepath, "wb") as output_file: output_file.write(res.content) # Save cache metadata, if needed etag = res.headers.get("etag", None) if etag: cache_metadata[cache_key] = etag with open(cache_metadata_filepath, "w") as cache_file: json.dump(cache_metadata, cache_file) return cache_filepath
Sphinx "builder-inited" event handler.
def builder_inited(app): """Sphinx "builder-inited" event handler.""" script_url = app.config.redoc_script_url output_filename = "script.js" fetch_and_cache(script_url, output_filename)
Sphinx "build-finished" event handler.
def build_finished(app, exception): """Sphinx "build-finished" event handler.""" if exception or not isinstance(app.builder, builders.StandaloneHTMLBuilder): return script_url = app.config.redoc_script_url output_filename = "script.js" cache_filepath = fetch_and_cache(script_url, output_filename) _copy_file(cache_filepath, os.path.join(app.builder.outdir, "_static", "redoc.js"))
Setup plugin
def setup(app): """Setup plugin""" app.add_config_value("redoc_script_url", None, "env") app.connect("builder-inited", builder_inited) app.connect("build-finished", build_finished) return {"parallel_read_safe": True, "parallel_write_safe": True}
Decorate an inline code so that SubstitutionCodeBlockTransform will notice it
def substitution_code_role(*args, **kwargs) -> tuple[list, list[SystemMessage]]: """Decorate an inline code so that SubstitutionCodeBlockTransform will notice it""" [node], system_messages = code_role(*args, **kwargs) node[_SUBSTITUTION_OPTION_NAME] = True # type: ignore[index] return [node], system_messages
Setup plugin
def setup(app: Sphinx) -> dict: """Setup plugin""" app.add_config_value("substitutions", [], "html") directives.register_directive("code-block", SubstitutionCodeBlock) app.add_role("subst-code", substitution_code_role) app.add_post_transform(SubstitutionCodeBlockTransform) app.add_post_transform(AddSpacepadSubstReference) return {"parallel_write_safe": True, "parallel_read_safe": True}
Prepares code snippet. :param file_path: file path :param line_no: line number :param context_lines_count: number of lines of context.
def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str: """Prepares code snippet. :param file_path: file path :param line_no: line number :param context_lines_count: number of lines of context. """ def guess_lexer_for_filename(filename): from pygments.lexers import get_lexer_for_filename from pygments.util import ClassNotFound try: lexer = get_lexer_for_filename(filename) except ClassNotFound: from pygments.lexers.special import TextLexer lexer = TextLexer() return lexer with open(file_path) as text_file: # Highlight code code = text_file.read() with suppress(ImportError): import pygments from pygments.formatters.terminal import TerminalFormatter code = pygments.highlight( code=code, formatter=TerminalFormatter(), lexer=guess_lexer_for_filename(file_path) ) code_lines = code.splitlines() # Prepend line number code_lines = [f"{line_no:4} | {line}" for line_no, line in enumerate(code_lines, 1)] # # Cut out the snippet start_line_no = max(0, line_no - context_lines_count) end_line_no = line_no + context_lines_count code_lines = code_lines[start_line_no:end_line_no] # Join lines code = "\n".join(code_lines) return code
Formats path nicely.
def pretty_format_path(path: str, start: str) -> str: """Formats path nicely.""" relpath = os.path.relpath(path, start) if relpath == path: return path return f"{start}/{relpath}"
Generates an index for development documentation. :param out_file: The path where the index should be stored
def generate_index(out_file: str) -> None: """ Generates an index for development documentation. :param out_file: The path where the index should be stored """ content = _render_content() with open(out_file, "w") as output_file: output_file.write(content)
Get list of all available providers packages to build.
def get_available_providers_packages(include_suspended: bool = False): """Get list of all available providers packages to build.""" return [ provider["package-name"] for provider in (ALL_PROVIDER_YAMLS_WITH_SUSPENDED if include_suspended else ALL_PROVIDER_YAMLS) ]
Get list of all available packages to build.
def get_available_packages(include_suspended: bool = False): """Get list of all available packages to build.""" provider_package_names = get_available_providers_packages(include_suspended=include_suspended) return [ "apache-airflow", *provider_package_names, "apache-airflow-providers", "helm-chart", "docker-stack", ]
Displays summary of errors
def display_errors_summary(build_errors: dict[str, list[DocBuildError]]) -> None: """Displays summary of errors""" console.print() console.print("[red]" + "#" * 30 + " Start docs build errors summary " + "#" * 30 + "[/]") console.print() for package_name, errors in build_errors.items(): if package_name: console.print("=" * 30 + f" [info]{package_name}[/] " + "=" * 30) else: console.print("=" * 30, " [info]General[/] ", "=" * 30) for warning_no, error in enumerate(sorted(errors), 1): console.print("-" * 30, f"[red]Error {warning_no:3}[/]", "-" * 20) console.print(error.message) console.print() if error.file_path and not error.file_path.endswith("<unknown>") and error.line_no: console.print( f"File path: {os.path.relpath(error.file_path, start=DOCS_DIR)} ({error.line_no})" ) if os.path.isfile(error.file_path): console.print() console.print(prepare_code_snippet(error.file_path, error.line_no)) elif error.file_path: console.print(f"File path: {error.file_path}") console.print() console.print("[red]" + "#" * 30 + " End docs build errors summary " + "#" * 30 + "[/]") console.print()
Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of DocBuildErrors.
def parse_sphinx_warnings(warning_text: str, docs_dir: str) -> list[DocBuildError]: """ Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of DocBuildErrors. """ sphinx_build_errors = [] for sphinx_warning in warning_text.splitlines(): if not sphinx_warning: continue warning_parts = sphinx_warning.split(":", 2) if len(warning_parts) == 3: try: sphinx_build_errors.append( DocBuildError( file_path=os.path.join(docs_dir, warning_parts[0]), line_no=int(warning_parts[1]), message=warning_parts[2], ) ) except Exception: # If an exception occurred while parsing the warning message, display the raw warning message. sphinx_build_errors.append( DocBuildError(file_path=None, line_no=None, message=sphinx_warning) ) else: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) return sphinx_build_errors
Download a file, validate Sphinx Inventory headers and returns status information as a tuple with package name and success status(bool value).
def _fetch_file(session: requests.Session, package_name: str, url: str, path: str) -> tuple[str, bool]: """ Download a file, validate Sphinx Inventory headers and returns status information as a tuple with package name and success status(bool value). """ try: response = session.get(url, allow_redirects=True, stream=True) except (requests.RequestException, urllib3.exceptions.HTTPError): print(f"{package_name}: Failed to fetch inventory: {url}") traceback.print_exc(file=sys.stderr) return package_name, False if not response.ok: print(f"{package_name}: Failed to fetch inventory: {url}") print(f"{package_name}: Failed with status: {response.status_code}", file=sys.stderr) return package_name, False if response.url != url: print(f"{package_name}: {url} redirected to {response.url}") with NamedTemporaryFile(suffix=package_name, mode="wb+") as tf: for chunk in response.iter_content(chunk_size=4096): tf.write(chunk) tf.flush() tf.seek(0, 0) line = InventoryFileReader(tf).readline() if not line.startswith("# Sphinx inventory version"): print(f"{package_name}: Response contain unexpected Sphinx Inventory header: {line!r}.") return package_name, False tf.seek(0, 0) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "wb") as f: shutil.copyfileobj(tf, f) print(f"{package_name}: Fetched inventory: {response.url}") return package_name, True
Fetch all inventories for Airflow documentation packages and store in cache.
def fetch_inventories(): """Fetch all inventories for Airflow documentation packages and store in cache.""" os.makedirs(os.path.dirname(CACHE_DIR), exist_ok=True) to_download: list[tuple[str, str, str]] = [] for pkg_name in get_available_providers_packages(): to_download.append( ( pkg_name, S3_DOC_URL_VERSIONED.format(package_name=pkg_name), f"{CACHE_DIR}/{pkg_name}/objects.inv", ) ) for pkg_name in ["apache-airflow", "helm-chart"]: to_download.append( ( pkg_name, S3_DOC_URL_VERSIONED.format(package_name=pkg_name), f"{CACHE_DIR}/{pkg_name}/objects.inv", ) ) for pkg_name in ["apache-airflow-providers", "docker-stack"]: to_download.append( ( pkg_name, S3_DOC_URL_NON_VERSIONED.format(package_name=pkg_name), f"{CACHE_DIR}/{pkg_name}/objects.inv", ) ) to_download.extend( ( pkg_name, f"{doc_url}/objects.inv", f"{CACHE_DIR}/{pkg_name}/objects.inv", ) for pkg_name, doc_url in THIRD_PARTY_INDEXES.items() ) to_download = [(pkg_name, url, path) for pkg_name, url, path in to_download if _is_outdated(path)] if not to_download: print("Nothing to do") return [] print(f"To download {len(to_download)} inventorie(s)") with requests.Session() as session, concurrent.futures.ThreadPoolExecutor(DEFAULT_POOLSIZE) as pool: download_results: Iterator[tuple[str, bool]] = pool.map( _fetch_file, itertools.repeat(session, len(to_download)), (pkg_name for pkg_name, _, _ in to_download), (url for _, url, _ in to_download), (path for _, _, path in to_download), ) failed, success = partition(lambda d: d[1], download_results) failed, success = list(failed), list(success) print(f"Result: {len(success)} success, {len(failed)} failed") if failed: terminate = False print("Failed packages:") for pkg_no, (pkg_name, _) in enumerate(failed, start=1): print(f"{pkg_no}. {pkg_name}") if not terminate and not pkg_name.startswith("apache-airflow"): # For solve situation that newly created Community Provider doesn't upload inventory yet. # And we terminate execution only if any error happen during fetching # third party intersphinx inventories. terminate = True if terminate: print("Terminate execution.") raise SystemExit(1) return [pkg_name for pkg_name, status in failed]
If used in GitHub Action, creates an expandable group in the GitHub Action log. Otherwise, display simple text groups. For more information, see: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#grouping-log-lines
def with_group(title): """ If used in GitHub Action, creates an expandable group in the GitHub Action log. Otherwise, display simple text groups. For more information, see: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#grouping-log-lines """ if os.environ.get("GITHUB_ACTIONS", "false") != "true": print("#" * 20, title, "#" * 20) yield return print(f"::group::{title}") print() yield print("\033[0m") print("::endgroup::")
Find names of existing operators. :return names of existing operators.
def find_existing_guide_operator_names(src_dir_pattern: str) -> set[str]: """ Find names of existing operators. :return names of existing operators. """ operator_names = set() paths = glob(src_dir_pattern, recursive=True) for path in paths: with open(path) as f: operator_names |= set(re.findall(".. _howto/operator:(.+?):", f.read())) return operator_names
Extracts class definition by name :param ast_tree: AST tree :param class_name: name of the class. :return: class node found
def extract_ast_class_def_by_name(ast_tree, class_name): """ Extracts class definition by name :param ast_tree: AST tree :param class_name: name of the class. :return: class node found """ for node in ast.walk(ast_tree): if isinstance(node, ast.ClassDef) and node.name == class_name: return node return None
Check if there are links to guides in operator's descriptions.
def check_guide_links_in_operator_descriptions() -> list[DocBuildError]: """Check if there are links to guides in operator's descriptions.""" build_errors = [] build_errors.extend( _check_missing_guide_references( operator_names=find_existing_guide_operator_names( f"{DOCS_DIR}/apache-airflow/howto/operator/**/*.rst" ), python_module_paths=itertools.chain( glob(f"{ROOT_PACKAGE_DIR}/operators/*.py"), glob(f"{ROOT_PACKAGE_DIR}/sensors/*.py"), ), ) ) for provider in ALL_PROVIDER_YAMLS: operator_names = { *find_existing_guide_operator_names(f"{DOCS_DIR}/{provider['package-name']}/operators/**/*.rst"), *find_existing_guide_operator_names(f"{DOCS_DIR}/{provider['package-name']}/operators.rst"), } # Extract all potential python modules that can contain operators python_module_paths = itertools.chain( glob(f"{provider['package-dir']}/**/operators/*.py", recursive=True), glob(f"{provider['package-dir']}/**/sensors/*.py", recursive=True), glob(f"{provider['package-dir']}/**/transfers/*.py", recursive=True), ) build_errors.extend( _check_missing_guide_references( operator_names=operator_names, python_module_paths=python_module_paths ) ) return build_errors
Asserts that file does not contain the pattern. Return message error if it does. :param file_path: file :param pattern: pattern :param message: message to return
def assert_file_not_contains( *, file_path: str, pattern: str, message: str | None = None ) -> DocBuildError | None: """ Asserts that file does not contain the pattern. Return message error if it does. :param file_path: file :param pattern: pattern :param message: message to return """ return _extract_file_content(file_path, message, pattern, False)
Asserts that file does contain the pattern. Return message error if it does not. :param file_path: file :param pattern: pattern :param message: message to return
def assert_file_contains(*, file_path: str, pattern: str, message: str | None = None) -> DocBuildError | None: """ Asserts that file does contain the pattern. Return message error if it does not. :param file_path: file :param pattern: pattern :param message: message to return """ return _extract_file_content(file_path, message, pattern, True)
Filters file list to those that content matches the pattern :param file_paths: file paths to check :param pattern: pattern to match :return: list of files matching the pattern
def filter_file_list_by_pattern(file_paths: Iterable[str], pattern: str) -> list[str]: """ Filters file list to those that content matches the pattern :param file_paths: file paths to check :param pattern: pattern to match :return: list of files matching the pattern """ output_paths = [] pattern_compiled = re.compile(pattern) for file_path in file_paths: with open(file_path, "rb", 0) as text_file: text_file_content = text_file.read().decode() if re.findall(pattern_compiled, text_file_content): output_paths.append(file_path) return output_paths
Finds all modules. :param deprecated_only: whether only deprecated modules should be found. :return: set of all modules found
def find_modules(deprecated_only: bool = False) -> set[str]: """ Finds all modules. :param deprecated_only: whether only deprecated modules should be found. :return: set of all modules found """ file_paths = glob(f"{ROOT_PACKAGE_DIR}/**/*.py", recursive=True) # Exclude __init__.py file_paths = [f for f in file_paths if not f.endswith("__init__.py")] if deprecated_only: file_paths = filter_file_list_by_pattern(file_paths, r"This module is deprecated.") # Make path relative file_paths = [os.path.relpath(f, ROOT_PROJECT_DIR) for f in file_paths] # Convert filename to module modules_names = {file_path.rpartition(".")[0].replace("/", ".") for file_path in file_paths} return modules_names
Checks all exampleincludes for example dags.
def check_exampleinclude_for_example_dags() -> list[DocBuildError]: """Checks all exampleincludes for example dags.""" all_docs_files = glob(f"{DOCS_DIR}/**/*.rst", recursive=True) build_errors = [] for doc_file in all_docs_files: build_error = assert_file_not_contains( file_path=doc_file, pattern=r"literalinclude::.+(?:example_dags|tests/system/)", message=( "literalinclude directive is prohibited for example DAGs. \n" "You should use the exampleinclude directive to include example DAGs." ), ) if build_error: build_errors.append(build_error) return build_errors
Checks all code:: blocks.
def check_enforce_code_block() -> list[DocBuildError]: """Checks all code:: blocks.""" all_docs_files = glob(f"{DOCS_DIR}/**/*.rst", recursive=True) build_errors = [] for doc_file in all_docs_files: build_error = assert_file_not_contains( file_path=doc_file, pattern=r"^.. code::", message=( "We recommend using the code-block directive instead of the code directive. " "The code-block directive is more feature-full." ), ) if build_error: build_errors.append(build_error) return build_errors
Checks that each documentation for provider packages has a link to PyPI files in the TOC.
def check_pypi_repository_in_provider_tocs() -> list[DocBuildError]: """Checks that each documentation for provider packages has a link to PyPI files in the TOC.""" build_errors = [] for provider in ALL_PROVIDER_YAMLS: doc_file_path = f"{DOCS_DIR}/{provider['package-name']}/index.rst" expected_text = f"PyPI Repository <https://pypi.org/project/{provider['package-name']}/>" build_error = assert_file_contains( file_path=doc_file_path, pattern=re.escape(expected_text), message=( f"A link to the PyPI in table of contents is missing. Can you please add it?\n\n" f" {expected_text}" ), ) if build_error: build_errors.append(build_error) return build_errors
Run all checks from this module
def run_all_check(disable_provider_checks: bool = False) -> list[DocBuildError]: """Run all checks from this module""" general_errors = [] general_errors.extend(check_guide_links_in_operator_descriptions()) general_errors.extend(check_enforce_code_block()) general_errors.extend(check_exampleinclude_for_example_dags()) if not disable_provider_checks: general_errors.extend(check_pypi_repository_in_provider_tocs()) return general_errors
Yields the ids of suspended providers.
def get_removed_provider_ids() -> list[str]: """ Yields the ids of suspended providers. """ import yaml removed_provider_ids = [] for provider_path in PROVIDERS_DIR.rglob("provider.yaml"): provider_yaml = yaml.safe_load(provider_path.read_text()) if provider_yaml["state"] == "removed": removed_provider_ids.append( provider_yaml["package-name"][len("apache-airflow-providers-") :].replace("-", ".") ) return removed_provider_ids
Filters the package list against a set of filters. A packet is returned if it matches at least one filter. The function keeps the order of the packages.
def process_package_filters(available_packages: list[str], package_filters: list[str] | None): """Filters the package list against a set of filters. A packet is returned if it matches at least one filter. The function keeps the order of the packages. """ if not package_filters: return available_packages suspended_packages = [ f"apache-airflow-providers-{provider.replace('.','-')}" for provider in get_removed_provider_ids() ] all_packages_with_suspended = available_packages + suspended_packages invalid_filters = [ f for f in package_filters if not any(fnmatch.fnmatch(p, f) for p in all_packages_with_suspended) ] if invalid_filters: raise SystemExit( f"Some filters did not find any package: {invalid_filters}, Please check if they are correct." ) return [p for p in all_packages_with_suspended if any(fnmatch.fnmatch(p, f) for f in package_filters)]
Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of SpellingError.
def parse_spelling_warnings(warning_text: str, docs_dir: str) -> list[SpellingError]: """ Parses warnings from Sphinx. :param warning_text: warning to parse :param docs_dir: documentation directory :return: list of SpellingError. """ sphinx_spelling_errors = [] for sphinx_warning in warning_text.splitlines(): if not sphinx_warning: continue warning_parts = None match = re.search(r"(.*):(\w*):\s\((\w*)\)\s?(\w*)\s?(.*)", sphinx_warning) if match: warning_parts = match.groups() if warning_parts and len(warning_parts) == 5: try: sphinx_spelling_errors.append( SpellingError( file_path=os.path.join(docs_dir, warning_parts[0]), line_no=int(warning_parts[1]) if warning_parts[1] not in ("None", "") else None, spelling=warning_parts[2], suggestion=warning_parts[3] if warning_parts[3] else None, context_line=warning_parts[4], message=sphinx_warning, ) ) except Exception: # If an exception occurred while parsing the warning message, display the raw warning message. sphinx_spelling_errors.append( SpellingError( file_path=None, line_no=None, spelling=None, suggestion=None, context_line=None, message=sphinx_warning, ) ) else: sphinx_spelling_errors.append( SpellingError( file_path=None, line_no=None, spelling=None, suggestion=None, context_line=None, message=sphinx_warning, ) ) return sphinx_spelling_errors
Displays summary of Spelling errors
def display_spelling_error_summary(spelling_errors: dict[str, list[SpellingError]]) -> None: """Displays summary of Spelling errors""" console.print() console.print("[red]" + "#" * 30 + " Start spelling errors summary " + "#" * 30 + "[/]") console.print() for package_name, errors in sorted(spelling_errors.items()): if package_name: console.print("=" * 30, f" [info]{package_name}[/] ", "=" * 30) else: console.print("=" * 30, " [info]General[/] ", "=" * 30) for warning_no, error in enumerate(sorted(errors), 1): console.print("-" * 30, f"Error {warning_no:3}", "-" * 30) _display_error(error) console.print("=" * 100) console.print() msg = """[green] If there are spelling errors related to class or function name, make sure those names are quoted with backticks '`' - this should exclude it from spellcheck process. If there are spelling errors in the summary above, and the spelling is correct, add the spelling to docs/spelling_wordlist.txt or use the spelling directive. Check https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#private-dictionaries for more details. If there are no spelling errors in the summary above, there might be an issue unrelated to spelling. Please review the traceback. """ console.print(msg) console.print() console.print console.print("[red]" + "#" * 30 + " End docs build errors summary " + "#" * 30 + "[/]") console.print
Returns number of wrong checkout instructions in the workflow file
def check_file(the_file: Path) -> int: """Returns number of wrong checkout instructions in the workflow file""" error_num = 0 res = yaml.safe_load(the_file.read_text()) console.print(f"Checking file [yellow]{the_file}[/]") for job in res["jobs"].values(): if job.get("steps") is None: continue for step in job["steps"]: uses = step.get("uses") pretty_step = yaml.safe_dump(step, indent=2) if uses is not None and uses.startswith("actions/checkout"): with_clause = step.get("with") if with_clause is None: console.print(f"\n[red]The `with` clause is missing in step:[/]\n\n{pretty_step}") error_num += 1 continue path = with_clause.get("path") if path == "constraints": # This is a special case - we are ok with persisting credentials in constraints # step, because we need them to push constraints back to the repository in "canary" # build. This is ok for security, because we are pushing it only in the `main` branch # of the repository and only for unprotected constraints branch continue persist_credentials = with_clause.get("persist-credentials") if persist_credentials is None: console.print( "\n[red]The `with` clause does not have persist-credentials in step:[/]" f"\n\n{pretty_step}" ) error_num += 1 continue else: if persist_credentials: console.print( "\n[red]The `with` clause have persist-credentials=True in step:[/]" f"\n\n{pretty_step}" ) error_num += 1 continue return error_num
Return a list of class declared in the given python file.
def get_classes(file_path: str) -> Iterable[ast.ClassDef]: """Return a list of class declared in the given python file.""" pathlib_path = pathlib.Path(file_path) module = ast.parse(pathlib_path.read_text("utf-8"), str(pathlib_path)) for node in ast.walk(module): if isinstance(node, ast.ClassDef): yield node
Return the subclass's name of a given class definition.
def is_subclass_of_dbapihook(node: ast.ClassDef) -> bool: """Return the subclass's name of a given class definition.""" for base in node.bases: if isinstance(base, ast.Name) and base.id == "DbApiHook": return True return False
Return True if the given class implements `_make_common_data_structure` method.
def has_make_serializable_method(node: ast.ClassDef) -> bool: """Return True if the given class implements `_make_common_data_structure` method.""" for body_element in node.body: if isinstance(body_element, ast.FunctionDef) and (body_element.name == MAKE_COMMON_METHOD_NAME): return True return False
Determine the path of the provider.yaml file related to the given python file.
def determine_provider_yaml_path(file_path: str) -> str: """Determine the path of the provider.yaml file related to the given python file.""" return f"{file_path.split('/hooks')[0]}/provider.yaml"
Load content of a yaml files.
def get_yaml_content(file_path: str) -> dict: """Load content of a yaml files.""" with open(file_path) as file: return yaml.safe_load(file)
Return the version constraints of `apache-airflow-providers-common-sql`.
def get_common_sql_constraints(provider_metadata: dict) -> str | None: """Return the version constraints of `apache-airflow-providers-common-sql`.""" dependencies: list[str] = provider_metadata["dependencies"] for dependency in dependencies: if dependency.startswith(COMMON_SQL_PROVIDER_NAME): return dependency[len(COMMON_SQL_PROVIDER_NAME) :] return None
Check if the `version_string` is constrained to at least >= 1.8.1.
def do_version_satisfies_constraints( version: str, max_incompatible_version=COMMON_SQL_PROVIDER_LATEST_INCOMPATIBLE_VERSION, ) -> bool: """Check if the `version_string` is constrained to at least >= 1.8.1.""" constraints: list[str] = [constraint.strip() for constraint in version.split(",")] specifier_set = SpecifierSet(",".join(constraints)) return not specifier_set.contains(max_incompatible_version)
Check whether default is 'conf.getboolean("operators", "default_deferrable", fallback=False)'
def _is_valid_deferrable_default(default: ast.AST) -> bool: """Check whether default is 'conf.getboolean("operators", "default_deferrable", fallback=False)'""" if not isinstance(default, ast.Call): return False # Not a function call. # Check the function callee is exactly 'conf.getboolean'. call_to_conf_getboolean = ( isinstance(default.func, ast.Attribute) and isinstance(default.func.value, ast.Name) and default.func.value.id == "conf" and default.func.attr == "getboolean" ) if not call_to_conf_getboolean: return False # Check arguments. return ( len(default.args) == 2 and isinstance(default.args[0], ast.Constant) and default.args[0].value == "operators" and isinstance(default.args[1], ast.Constant) and default.args[1].value == "default_deferrable" and len(default.keywords) == 1 and default.keywords[0].arg == "fallback" and isinstance(default.keywords[0].value, ast.Constant) and default.keywords[0].value.value is False )
Get list of integrations from matching filenames.
def get_ci_integrations( tests_path: Path = INTEGRATION_TESTS_PATH, integration_prefix: str = INTEGRATION_TEST_PREFIX, ) -> dict[str, Path]: """Get list of integrations from matching filenames.""" if not tests_path.is_dir() and tests_path.exists(): console.print(f"[red]Bad tests path: {tests_path}. [/]") sys.exit(1) integrations_files = [_i for _i in tests_path.glob(integration_prefix)] if len(integrations_files) == 0: console.print( f"[red]No integrations found." f"Pattern '{integration_prefix}' did not match any files under {tests_path}. [/]" ) sys.exit(1) # parse into list of ids integrations = {} for _i in integrations_files: try: _key = _i.stem.split("-")[1] integrations[_key] = _i except IndexError: console.print(f"[red]Tried to parse {_i.stem}, but did not contain '-' separator. [/]") continue return integrations
Get integrations listed in docs.
def get_docs_integrations(docs_path: Path = DOCUMENTATION_PATH): """Get integrations listed in docs.""" table_lines = [] _list_start_line = None with open(docs_path, encoding="utf8") as f: for line_n, line in enumerate(f): if DOCS_MARKER_END in line: break if DOCS_MARKER_START in line: _list_start_line = line_n if _list_start_line is None: continue if line_n > _list_start_line: table_lines.append(line) if len(table_lines) == 0: console.print("[red]No integrations table in docs.[/]") sys.exit(1) table_cells = [] for line in table_lines: m = re.findall(_LIST_MATCH, line) if len(m) == 0: continue table_cells.append(m[0].strip("|").strip()) def _list_matcher(j): """Filter callable to exclude header and empty cells.""" if len(j) == 0: return False elif j in ["Description", "Identifier"]: return False else: return True table_cells = list(filter(_list_matcher, table_cells)) return table_cells
Generate docs table.
def update_integration_tests_array(contents: dict[str, list[str]]): """Generate docs table.""" rows = [] sorted_contents = dict(sorted(contents.items())) for integration, description in sorted_contents.items(): formatted_hook_description = ( description[0] if len(description) == 1 else "* " + "\n* ".join(description) ) rows.append((integration, formatted_hook_description)) formatted_table = "\n" + tabulate(rows, tablefmt="grid", headers=("Identifier", "Description")) + "\n\n" insert_documentation( file_path=AIRFLOW_SOURCES_ROOT_PATH / "contributing-docs" / "testing" / "integration_tests.rst", content=formatted_table.splitlines(keepends=True), header=DOCS_MARKER_START, footer=DOCS_MARKER_END, )
Extract all breeze.description labels per image.
def _get_breeze_description(parsed_compose: dict[str, Any], label_key: str = "breeze.description"): """Extract all breeze.description labels per image.""" image_label_map = {} # possible key error handled outside for _img_name, img in parsed_compose["services"].items(): try: for _label_name, label in img["labels"].items(): if _label_name == label_key: image_label_map[_img_name] = label except KeyError: # service has no 'lables' entry continue return image_label_map
Pull breeze description from docker-compose files.
def get_integration_descriptions(integrations: dict[str, Path]) -> dict[str, list[Any]]: """Pull breeze description from docker-compose files.""" table = {} for integration, path in integrations.items(): with open(path) as f: _compose = yaml.safe_load(f) try: _labels = _get_breeze_description(_compose) except KeyError: console.print(f"[red]No 'services' entry in compose file {path}.[/]") sys.exit(1) table[integration] = list(_labels.values()) return table
Renders template based on its name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir. :param searchpath: Path to search images in :param template_name: name of the template to use :param context: Jinja2 context :param extension: Target file extension :param autoescape: Whether to autoescape HTML :param keep_trailing_newline: Whether to keep the newline in rendered output :return: rendered template
def render_template( searchpath: Path, template_name: str, context: dict[str, Any], extension: str, autoescape: bool = True, keep_trailing_newline: bool = False, ) -> str: """ Renders template based on its name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir. :param searchpath: Path to search images in :param template_name: name of the template to use :param context: Jinja2 context :param extension: Target file extension :param autoescape: Whether to autoescape HTML :param keep_trailing_newline: Whether to keep the newline in rendered output :return: rendered template """ import jinja2 template_loader = jinja2.FileSystemLoader(searchpath=searchpath) template_env = jinja2.Environment( loader=template_loader, undefined=jinja2.StrictUndefined, autoescape=autoescape, keep_trailing_newline=keep_trailing_newline, ) template = template_env.get_template(f"{template_name}_TEMPLATE{extension}.jinja2") content: str = template.render(context) return content
Allow content to be defined with leading empty lines and strip/add EOL
def process_content_to_write(content: str) -> str: """Allow content to be defined with leading empty lines and strip/add EOL""" if not content: return content content_lines = content.splitlines() if content_lines and content_lines[0] == "": content_lines = content_lines[1:] content_to_write = "\n".join(content_lines) + "\n" return content_to_write
Pre-process files passed to mypy. * When running build on non-main branch do not take providers into account. * When running "airflow/providers" package, then we need to add --namespace-packages flag. * When running "airflow" package, then we need to exclude providers.
def pre_process_files(files: list[str]) -> list[str]: """Pre-process files passed to mypy. * When running build on non-main branch do not take providers into account. * When running "airflow/providers" package, then we need to add --namespace-packages flag. * When running "airflow" package, then we need to exclude providers. """ default_branch = os.environ.get("DEFAULT_BRANCH") if not default_branch or default_branch == "main": return files result = [file for file in files if not file.startswith(f"airflow{os.sep}providers")] if "airflow/providers" in files: if len(files) > 1: raise RuntimeError( "When running `airflow/providers` package, you cannot run any other packages because only " "airflow/providers package requires --namespace-packages flag to be set" ) result.append("--namespace-packages") if "airflow" in files: if len(files) > 1: raise RuntimeError( "When running `airflow` package, you cannot run any other packages because only " "airflow/providers package requires --exclude airflow/providers/.* flag to be set" ) result.extend(["--exclude", "airflow/providers/.*"]) return result
Split extras into four types. :return: dictionary of extra types with tuple of two set,list - set of extras and text-wrapped list
def get_extra_types_dict() -> dict[str, list[str]]: """ Split extras into four types. :return: dictionary of extra types with tuple of two set,list - set of extras and text-wrapped list """ extra_type_dict: dict[str, list[str]] = {} for extra_dict, extra_description in ALL_DYNAMIC_EXTRA_DICTS: extra_list = sorted(extra_dict) if extra_dict == BUNDLE_EXTRAS: extra_list = sorted(extra_list + ALL_GENERATED_BUNDLE_EXTRAS) extra_type_dict[extra_description] = get_wrapped_list(extra_list) extra_type_dict["Provider extras"] = get_wrapped_list(PROVIDER_DEPENDENCIES) return extra_type_dict
Return full path to the user-specific cache dir for this application
def _cache_dir(): """Return full path to the user-specific cache dir for this application""" path = os.path.join(AIRFLOW_SOURCES_DIR, ".build", "cache") os.makedirs(path, exist_ok=True) return path
Fetch URL to local cache and returns path.
def fetch_and_cache(url: str, output_filename: str): """Fetch URL to local cache and returns path.""" cache_key = _gethash(url) cache_dir = _cache_dir() cache_metadata_filepath = os.path.join(cache_dir, "cache-metadata.json") cache_filepath = os.path.join(cache_dir, f"{cache_key}-{output_filename[:64]}") # Create cache directory os.makedirs(cache_dir, exist_ok=True) # Load cache metadata cache_metadata: dict[str, str] = {} if os.path.exists(cache_metadata_filepath): try: with open(cache_metadata_filepath) as cache_file: cache_metadata = json.load(cache_file) except json.JSONDecodeError: os.remove(cache_metadata_filepath) etag = cache_metadata.get(cache_key) # If we have a file and etag, check the fast path if os.path.exists(cache_filepath) and etag: res = requests.get(url, headers={"If-None-Match": etag}) if res.status_code == 304: return cache_filepath # Slow patch res = requests.get(url) res.raise_for_status() with open(cache_filepath, "wb") as output_file: output_file.write(res.content) # Save cache metadata, if needed etag = res.headers.get("etag", None) if etag: cache_metadata[cache_key] = etag with open(cache_metadata_filepath, "w") as cache_file: json.dump(cache_metadata, cache_file) return cache_filepath
Loads a file using a serializer which guesses based on the file extension
def load_file(file_path: str): """Loads a file using a serializer which guesses based on the file extension""" if file_path.lower().endswith(".json"): with open(file_path) as input_file: return json.load(input_file) elif file_path.lower().endswith((".yaml", ".yml")): with open(file_path) as input_file: return yaml.safe_load(input_file) raise _ValidatorError("Unknown file format. Supported extension: '.yaml', '.json'")
Main code
def main() -> int: """Main code""" parser = _get_parser() args = parser.parse_args() spec_url = args.spec_url spec_file = args.spec_file enforce_defaults = args.enforce_defaults schema = _load_spec(spec_file, spec_url) validator = _create_validator(schema, enforce_defaults) file_paths = args.file exit_code = _process_files(validator, file_paths) return exit_code
Whether an expression is NEW_SESSION. Old code written before the introduction of NEW_SESSION (and even some new if the contributor wasn't made aware of the addition) generally uses None as the default value, so we add that to the check as well.
def _is_new_session_or_none(value: ast.expr) -> _SessionDefault | None: """Whether an expression is NEW_SESSION. Old code written before the introduction of NEW_SESSION (and even some new if the contributor wasn't made aware of the addition) generally uses None as the default value, so we add that to the check as well. """ if isinstance(value, ast.Constant) and value.value is None: return _SessionDefault.none if isinstance(value, ast.Name) and value.id == "NEW_SESSION": return _SessionDefault.new_session # It's possible to do FOO = NEW_SESSION and reference FOO to work around # this check, but let's rely on reviewers to catch this kind of shenanigans. return None