response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Whether expected decorators are provided. Three decorators would allow NEW_SESSION usages: * ``@provide_session``: The canonical case. * ``@overload``: A typing overload and not something to actually execute. * ``@abstractmethod``: This will be overridden in a subclass anyway.
def _is_decorated_correctly(nodes: list[ast.expr]) -> bool: """Whether expected decorators are provided. Three decorators would allow NEW_SESSION usages: * ``@provide_session``: The canonical case. * ``@overload``: A typing overload and not something to actually execute. * ``@abstractmethod``: This will be overridden in a subclass anyway. """ # This only accepts those decorators literally. Should be enough? return any(isinstance(node, ast.Name) and node.id in _ALLOWED_DECORATOR_NAMES for node in nodes)
Check NEW_SESSION usages outside functions decorated with provide_session.
def _iter_incorrect_new_session_usages(path: pathlib.Path) -> typing.Iterator[ast.FunctionDef]: """Check NEW_SESSION usages outside functions decorated with provide_session.""" for node in ast.walk(ast.parse(path.read_text("utf-8"), str(path))): if not isinstance(node, ast.FunctionDef): continue session = _get_session_arg_and_default(node.args) if session is None or session.default is None: continue # No session argument or the argument has no default, we're good. if _is_decorated_correctly(node.decorator_list): continue # Has @provide_session so the default is expected. default_kind = _is_new_session_or_none(session.default) if default_kind is None: continue # Default value is not NEW_SESSION or None. if default_kind == _SessionDefault.none and _annotation_has_none(session.argument.annotation): continue # None is OK if the argument is explicitly typed as None. yield node
Find definition of the ``DAG`` class's ``__init__``.
def _find_dag_init(mod: ast.Module) -> ast.FunctionDef: """Find definition of the ``DAG`` class's ``__init__``.""" dag_class = next(n for n in ast.iter_child_nodes(mod) if isinstance(n, ast.ClassDef) and n.name == "DAG") return next( node for node in ast.iter_child_nodes(dag_class) if isinstance(node, ast.FunctionDef) and node.name == "__init__" )
Find definition of the ``@dag`` decorator.
def _find_dag_deco(mod: ast.Module) -> ast.FunctionDef: """Find definition of the ``@dag`` decorator.""" return next(n for n in ast.iter_child_nodes(mod) if isinstance(n, ast.FunctionDef) and n.name == "dag")
Find definition of the ``TaskGroup`` class's ``__init__``.
def _find_tg_init(mod: ast.Module) -> ast.FunctionDef: """Find definition of the ``TaskGroup`` class's ``__init__``.""" task_group_class = next( node for node in ast.iter_child_nodes(mod) if isinstance(node, ast.ClassDef) and node.name == "TaskGroup" ) return next( node for node in ast.iter_child_nodes(task_group_class) if isinstance(node, ast.FunctionDef) and node.name == "__init__" )
Find definition of the ``@task_group`` decorator. The decorator has multiple overloads, but we want the first one, which contains task group init arguments.
def _find_tg_deco(mod: ast.Module) -> ast.FunctionDef: """Find definition of the ``@task_group`` decorator. The decorator has multiple overloads, but we want the first one, which contains task group init arguments. """ return next( node for node in ast.iter_child_nodes(mod) if isinstance(node, ast.FunctionDef) and node.name == "task_group" )
Retrieves hash of pyproject.toml file. This is used in order to determine if we need to upgrade Breeze, because some setup files changed. Blake2b algorithm will not be flagged by security checkers as insecure algorithm (in Python 3.9 and above we can use `usedforsecurity=False` to disable it, but for now it's better to use more secure algorithms.
def get_package_setup_metadata_hash() -> str: """ Retrieves hash of pyproject.toml file. This is used in order to determine if we need to upgrade Breeze, because some setup files changed. Blake2b algorithm will not be flagged by security checkers as insecure algorithm (in Python 3.9 and above we can use `usedforsecurity=False` to disable it, but for now it's better to use more secure algorithms. """ try: the_hash = hashlib.new("blake2b") the_hash.update((BREEZE_SOURCES_ROOT / "pyproject.toml").read_bytes()) return the_hash.hexdigest() except FileNotFoundError as e: return f"Missing file {e.filename}"
Returns summary of the changes. :param results: results of comparison in the form of line of strings :return: Tuple: [number of removed lines, number of added lines]
def summarize_changes(results: list[str]) -> tuple[int, int]: """ Returns summary of the changes. :param results: results of comparison in the form of line of strings :return: Tuple: [number of removed lines, number of added lines] """ removals, additions = 0, 0 for line in results: if line.startswith(("+", "[green]+")) and not line.startswith(("+#", "[green]+#")): # Skip additions of comments in counting removals additions += 1 if line.startswith(("-", "[red]+")) and not line.startswith(("-#", "[red]+#")): # Skip removals of comments in counting removals removals += 1 return removals, additions
Post process line of the stub file. Stubgen is not a perfect tool for generating stub files, but it is good starting point. We have to modify the stub files to make them more useful for us (as the approach of stubgen developers is not very open to add more options or features that are not very generic). The patching that we currently perform: * we add noqa to Incomplete imports from _typeshed (IntelliJ _typeshed does not like it) * we add historically published methods * fixes missing Union imports (see https://github.com/python/mypy/issues/12929) :param stub_file_path: path of the file we process :param line: line to post-process :param new_lines: new_lines - this is where we add post-processed lines
def post_process_line(stub_file_path: Path, line: str, new_lines: list[str]) -> None: """ Post process line of the stub file. Stubgen is not a perfect tool for generating stub files, but it is good starting point. We have to modify the stub files to make them more useful for us (as the approach of stubgen developers is not very open to add more options or features that are not very generic). The patching that we currently perform: * we add noqa to Incomplete imports from _typeshed (IntelliJ _typeshed does not like it) * we add historically published methods * fixes missing Union imports (see https://github.com/python/mypy/issues/12929) :param stub_file_path: path of the file we process :param line: line to post-process :param new_lines: new_lines - this is where we add post-processed lines """ if stub_file_path.relative_to(OUT_DIR_PROVIDERS) == Path("common") / "sql" / "operators" / "sql.pyi": stripped_line = line.strip() if stripped_line.startswith("parse_boolean: Incomplete"): # Handle Special case - historically we allow _parse_boolean to be part of the public API, # and we handle it via parse_boolean = _parse_boolean which produces Incomplete entry in the # stub - we replace the Incomplete method with both API methods that should be allowed. # We also strip empty lines to let black figure out where to add them. # # We can remove those when we determine it is not risky for the community - when we determine # That most of the historically released providers have a way to easily update them, and they # are likely not used due to long time of the "min-airflow-version" presence. Note that # currently we have no "hard" criteria on when we can remove those - even if we bump common.sql # provider to 2.*, the old providers (mainly google providers) might still use them. new_lines.append("def _parse_boolean(val: str) -> str | bool: ...") new_lines.append("def parse_boolean(val: str) -> str | bool: ...") elif stripped_line == "class SQLExecuteQueryOperator(BaseSQLOperator):": # The "_raise_exception" method is really part of the public API and should not be removed new_lines.append(line) new_lines.append(" def _raise_exception(self, exception_string: str) -> Incomplete: ...") elif stripped_line == "": pass else: new_lines.append(line) else: new_lines.append(line)
Post process the stub file: * adding (or replacing) preamble (makes sure we can replace preamble with new one in old files) * optionally patch the generated file :param module_name: name of the module of the file :param stub_file_path: path of the stub fil :param lines: lines that were read from the file (with stripped comments) :param patch_generated_file: whether we should patch generated file :return: resulting lines of the file after post-processing
def post_process_generated_stub_file( module_name: str, stub_file_path: Path, lines: list[str], patch_generated_file=False ): """ Post process the stub file: * adding (or replacing) preamble (makes sure we can replace preamble with new one in old files) * optionally patch the generated file :param module_name: name of the module of the file :param stub_file_path: path of the stub fil :param lines: lines that were read from the file (with stripped comments) :param patch_generated_file: whether we should patch generated file :return: resulting lines of the file after post-processing """ template = jinja2.Template(PREAMBLE) new_lines = template.render(module_name=module_name).splitlines() for line in lines: if patch_generated_file: post_process_line(stub_file_path, line, new_lines) else: new_lines.append(line) return new_lines
Writes the content to the file. :param pyi_file_path: path of the file to write :param content: content to write (will be properly formatted)
def write_pyi_file(pyi_file_path: Path, content: str) -> None: """ Writes the content to the file. :param pyi_file_path: path of the file to write :param content: content to write (will be properly formatted) """ pyi_file_path.write_text(black_format(content, is_pyi=True), encoding="utf-8")
Reads stub file content with post-processing and optionally patching historical methods. The comments and initial javadoc are stripped and preamble is always added. It makes sure that we can update the preamble and have it automatically updated in generated files even if no API specification changes. If None is returned, the file should be deleted. :param module_name: name of the module in question :param pyi_file_path: the path of the file to read :param patch_generated_files: whether the historical methods should be patched :return: list of lines of post-processed content or None if the file should be deleted.
def read_pyi_file_content( module_name: str, pyi_file_path: Path, patch_generated_files=False ) -> list[str] | None: """ Reads stub file content with post-processing and optionally patching historical methods. The comments and initial javadoc are stripped and preamble is always added. It makes sure that we can update the preamble and have it automatically updated in generated files even if no API specification changes. If None is returned, the file should be deleted. :param module_name: name of the module in question :param pyi_file_path: the path of the file to read :param patch_generated_files: whether the historical methods should be patched :return: list of lines of post-processed content or None if the file should be deleted. """ lines_no_comments = [ line for line in pyi_file_path.read_text(encoding="utf-8").splitlines() if line.strip() and not line.strip().startswith("#") ] remove_docstring = False lines = [] for line in lines_no_comments: if line.strip().startswith('"""'): remove_docstring = not remove_docstring elif not remove_docstring: lines.append(line) if (pyi_file_path.name == "__init__.pyi") and lines == []: console.print(f"[yellow]Skip {pyi_file_path} as it is an empty stub for __init__.py file") return None return post_process_generated_stub_file( module_name, pyi_file_path, lines, patch_generated_file=patch_generated_files )
Compare generated with stored files and returns True in case some modifications are needed. :param generated_stub_path: path of the stub that has been generated :param force_override: whether to override the API stubs even if there are removals :return: True if some updates were detected
def compare_stub_files(generated_stub_path: Path, force_override: bool) -> tuple[int, int]: """ Compare generated with stored files and returns True in case some modifications are needed. :param generated_stub_path: path of the stub that has been generated :param force_override: whether to override the API stubs even if there are removals :return: True if some updates were detected """ _removals, _additions = 0, 0 rel_path = generated_stub_path.relative_to(OUT_DIR_PROVIDERS) stub_file_target_path = PROVIDERS_ROOT / rel_path if stub_file_target_path.name == "__init__.pyi": return _removals, _additions module_name = "airflow.providers." + os.fspath(rel_path.with_suffix("")).replace(os.path.sep, ".") generated_pyi_content = read_pyi_file_content( module_name, generated_stub_path, patch_generated_files=True ) if generated_pyi_content is None: generated_stub_path.unlink() if stub_file_target_path.exists(): console.print( f"[red]The {stub_file_target_path} file is missing in generated files: " "but we are deleting it because it is an empty __init__.pyi file." ) if _force_override: console.print( f"[yellow]The file {stub_file_target_path} has been removed " "as changes are force-overridden" ) stub_file_target_path.unlink() return 1, 0 else: console.print( f"[blue]The {generated_stub_path} file is an empty __init__.pyi file, we just ignore it." ) return 0, 0 if not stub_file_target_path.exists(): console.print(f"[yellow]New file {stub_file_target_path} has been missing. Treated as addition.") write_pyi_file(stub_file_target_path, "\n".join(generated_pyi_content) + "\n") return 0, 1 target_pyi_content = read_pyi_file_content( module_name, stub_file_target_path, patch_generated_files=False ) if target_pyi_content is None: target_pyi_content = [] if generated_pyi_content != target_pyi_content: console.print(f"[yellow]The {stub_file_target_path} has changed.") diff = ConsoleDiff() comparison_results = list(diff.compare(target_pyi_content, generated_pyi_content)) _removals, _additions = summarize_changes(comparison_results) console.print( "[bright_blue]Summary of the generated changes in common.sql " f"stub API file {stub_file_target_path}:[/]\n" ) console.print(textwrap.indent("\n".join(comparison_results), " " * 4)) if _removals == 0 or force_override: console.print(f"[yellow]The {stub_file_target_path} has been updated\n") console.print(f"[yellow]* additions: {total_additions}[/]") console.print(f"[yellow]* removals: {total_removals}[/]") write_pyi_file(stub_file_target_path, "\n".join(generated_pyi_content) + "\n") console.print( f"\n[bright_blue]The {stub_file_target_path} file has been updated automatically.[/]\n" "\n[yellow]Make sure to commit the changes.[/]" ) else: if force_override: write_pyi_file(stub_file_target_path, "\n".join(generated_pyi_content) + "\n") console.print( f"\n[bright_blue]The {stub_file_target_path} file has been updated automatically.[/]\n" "\n[yellow]Make sure to commit the changes.[/]" ) else: console.print(f"[green]OK. The {stub_file_target_path} has not changed.") return _removals, _additions
Check if a given class node is an operator, based of the string suffix of the base IDs (ends with "BaseOperator"). TODO: Enhance this function to work with nested inheritance trees through dynamic imports. :param class_node: The class node to check. :return: True if the class definition is of an operator, False otherwise.
def _is_operator(class_node: ast.ClassDef) -> bool: """ Check if a given class node is an operator, based of the string suffix of the base IDs (ends with "BaseOperator"). TODO: Enhance this function to work with nested inheritance trees through dynamic imports. :param class_node: The class node to check. :return: True if the class definition is of an operator, False otherwise. """ for base in class_node.bases: if isinstance(base, ast.Name) and base.id.endswith(BASE_OPERATOR_CLASS_NAME): return True return False
This method takes a class node as input and extracts the template fields from it. Template fields are identified by an assignment statement where the target is a variable named "template_fields" and the value is a tuple of constants. :param class_node: The class node representing the class for which template fields need to be extracted. :return: A list of template fields extracted from the class node.
def _extract_template_fields(class_node: ast.ClassDef) -> list[str]: """ This method takes a class node as input and extracts the template fields from it. Template fields are identified by an assignment statement where the target is a variable named "template_fields" and the value is a tuple of constants. :param class_node: The class node representing the class for which template fields need to be extracted. :return: A list of template fields extracted from the class node. """ for class_item in class_node.body: if isinstance(class_item, ast.Assign): for target in class_item.targets: if ( isinstance(target, ast.Name) and target.id == "template_fields" and isinstance(class_item.value, ast.Tuple) ): return [elt.value for elt in class_item.value.elts if isinstance(elt, ast.Constant)] elif isinstance(class_item, ast.AnnAssign): if ( isinstance(class_item.target, ast.Name) and class_item.target.id == "template_fields" and isinstance(class_item.value, ast.Tuple) ): return [elt.value for elt in class_item.value.elts if isinstance(elt, ast.Constant)] return []
This method checks if template fields are correctly assigned in a call to class parent's constructor call. It handles both the detection of missing assignments and invalid assignments. It assumes that if the call is valid - the parent class will correctly assign the template field. TODO: Enhance this function to work with nested inheritance trees through dynamic imports. :param missing_assignments: List[str] - List of template fields that have not been assigned a value. :param ctor_stmt: ast.Expr - AST node representing the constructor statement. :param invalid_assignments: List[str] - List of template fields that have been assigned incorrectly. :param template_fields: List[str] - List of template fields to be assigned. :return: List[str] - List of template fields that are still missing assignments.
def _handle_parent_constructor_kwargs( template_fields: list[str], ctor_stmt: ast.stmt, missing_assignments: list[str], invalid_assignments: list[str], ) -> list[str]: """ This method checks if template fields are correctly assigned in a call to class parent's constructor call. It handles both the detection of missing assignments and invalid assignments. It assumes that if the call is valid - the parent class will correctly assign the template field. TODO: Enhance this function to work with nested inheritance trees through dynamic imports. :param missing_assignments: List[str] - List of template fields that have not been assigned a value. :param ctor_stmt: ast.Expr - AST node representing the constructor statement. :param invalid_assignments: List[str] - List of template fields that have been assigned incorrectly. :param template_fields: List[str] - List of template fields to be assigned. :return: List[str] - List of template fields that are still missing assignments. """ if isinstance(ctor_stmt, ast.Expr): if ( isinstance(ctor_stmt.value, ast.Call) and isinstance(ctor_stmt.value.func, ast.Attribute) and isinstance(ctor_stmt.value.func.value, ast.Call) and isinstance(ctor_stmt.value.func.value.func, ast.Name) and ctor_stmt.value.func.value.func.id == "super" ): for arg in ctor_stmt.value.keywords: if arg.arg is not None and arg.arg in template_fields: if not isinstance(arg.value, ast.Name) or arg.arg != arg.value.id: invalid_assignments.append(arg.arg) assigned_targets = [arg.arg for arg in ctor_stmt.value.keywords if arg.arg is not None] return list(set(missing_assignments) - set(assigned_targets)) return missing_assignments
This method handles a single constructor statement by doing the following actions: 1. Removing assigned fields of template_fields from missing_assignments. 2. Detecting invalid assignments of template fields and adding them to invalid_assignments. :param template_fields: Tuple of template fields. :param ctor_stmt: Constructor statement (for example, self.field_name = param_name) :param missing_assignments: List of missing assignments. :param invalid_assignments: List of invalid assignments. :return: List of missing assignments after handling the assigned targets.
def _handle_constructor_statement( template_fields: list[str], ctor_stmt: ast.stmt, missing_assignments: list[str], invalid_assignments: list[str], ) -> list[str]: """ This method handles a single constructor statement by doing the following actions: 1. Removing assigned fields of template_fields from missing_assignments. 2. Detecting invalid assignments of template fields and adding them to invalid_assignments. :param template_fields: Tuple of template fields. :param ctor_stmt: Constructor statement (for example, self.field_name = param_name) :param missing_assignments: List of missing assignments. :param invalid_assignments: List of invalid assignments. :return: List of missing assignments after handling the assigned targets. """ assigned_template_fields: list[str] = [] if isinstance(ctor_stmt, ast.Assign): if isinstance(ctor_stmt.targets[0], ast.Attribute): for target in ctor_stmt.targets: if isinstance(target, ast.Attribute) and target.attr in template_fields: if isinstance(ctor_stmt.value, ast.BoolOp) and isinstance(ctor_stmt.value.op, ast.Or): _handle_assigned_field( assigned_template_fields, invalid_assignments, target, ctor_stmt.value.values[0] ) else: _handle_assigned_field( assigned_template_fields, invalid_assignments, target, ctor_stmt.value ) elif isinstance(ctor_stmt.targets[0], ast.Tuple) and isinstance(ctor_stmt.value, ast.Tuple): for target, value in zip(ctor_stmt.targets[0].elts, ctor_stmt.value.elts): if isinstance(target, ast.Attribute): _handle_assigned_field(assigned_template_fields, invalid_assignments, target, value) elif isinstance(ctor_stmt, ast.AnnAssign): if isinstance(ctor_stmt.target, ast.Attribute) and ctor_stmt.target.attr in template_fields: _handle_assigned_field( assigned_template_fields, invalid_assignments, ctor_stmt.target, ctor_stmt.value ) return list(set(missing_assignments) - set(assigned_template_fields))
Handle an assigned field by its value. :param assigned_template_fields: A list to store the valid assigned fields. :param invalid_assignments: A list to store the invalid assignments. :param target: The target field. :param value: The value of the field.
def _handle_assigned_field( assigned_template_fields: list[str], invalid_assignments: list[str], target: ast.Attribute, value: Any ) -> None: """ Handle an assigned field by its value. :param assigned_template_fields: A list to store the valid assigned fields. :param invalid_assignments: A list to store the invalid assignments. :param target: The target field. :param value: The value of the field. """ if not isinstance(value, ast.Name) or target.attr != value.id: invalid_assignments.append(target.attr) else: assigned_template_fields.append(target.attr)
This method checks a class's constructor for missing or invalid assignments of template fields. When there isn't a constructor - it assumes that the template fields are defined in the parent's constructor correctly. TODO: Enhance this function to work with nested inheritance trees through dynamic imports. :param class_node: the AST node representing the class definition :param template_fields: a tuple of template fields :return: the number of invalid template fields found
def _check_constructor_template_fields(class_node: ast.ClassDef, template_fields: list[str]) -> int: """ This method checks a class's constructor for missing or invalid assignments of template fields. When there isn't a constructor - it assumes that the template fields are defined in the parent's constructor correctly. TODO: Enhance this function to work with nested inheritance trees through dynamic imports. :param class_node: the AST node representing the class definition :param template_fields: a tuple of template fields :return: the number of invalid template fields found """ count = 0 class_name = class_node.name missing_assignments = template_fields.copy() invalid_assignments: list[str] = [] init_flag: bool = False for class_item in class_node.body: if isinstance(class_item, ast.FunctionDef) and class_item.name == "__init__": init_flag = True for ctor_stmt in class_item.body: missing_assignments = _handle_parent_constructor_kwargs( template_fields, ctor_stmt, missing_assignments, invalid_assignments ) missing_assignments = _handle_constructor_statement( template_fields, ctor_stmt, missing_assignments, invalid_assignments ) if init_flag and missing_assignments: count += len(missing_assignments) console.print( f"{class_name}'s constructor lacks direct assignments for " f"instance members corresponding to the following template fields " f"(i.e., self.field_name = field_name or super.__init__(field_name=field_name, ...) ):" ) console.print(f"[red]{missing_assignments}[/red]") if invalid_assignments: count += len(invalid_assignments) console.print( f"{class_name}'s constructor contains invalid assignments to the following instance " f"members that should be corresponding to template fields " f"(i.e., self.field_name = field_name):" ) console.print(f"[red]{[f'self.{entry}' for entry in invalid_assignments]}[/red]") return count
Check missing or invalid template fields in constructors of providers' operators. :return: The total number of errors found.
def main(): """ Check missing or invalid template fields in constructors of providers' operators. :return: The total number of errors found. """ err = 0 for path in sys.argv[1:]: console.print(f"[yellow]{path}[/yellow]") tree = ast.parse(open(path).read()) for node in ast.walk(tree): if isinstance(node, ast.ClassDef) and _is_operator(class_node=node): template_fields = _extract_template_fields(node) or [] err += _check_constructor_template_fields(node, template_fields) return err
Helper for sort and group by.
def sorted_groupby(it, grouping_key: Callable): """Helper for sort and group by.""" for group, grouped_data in groupby(sorted(it, key=grouping_key), key=grouping_key): yield group, list(grouped_data)
For Airflow we determine constraints in this order of preference: * AIRFLOW_SKIP_CONSTRAINTS=true - no constraints * AIRFLOW_CONSTRAINTS_LOCATION - constraints from this location (url) * AIRFLOW_CONSTRAINTS_REFERENCE + constraints mode if specified * if we know airflow version "constraints-VERSION" + constraints mode * DEFAULT_CONSTRAINT_BRANCH + constraints mode - as fallback * constraints-main + constraints mode - as fallback
def get_airflow_constraints_location( airflow_constraints_mode: str, airflow_constraints_location: str | None, airflow_constraints_reference: str | None, default_constraints_branch: str, airflow_package_version: str | None, github_repository: str, python_version: str, airflow_skip_constraints: bool, ) -> str | None: """For Airflow we determine constraints in this order of preference: * AIRFLOW_SKIP_CONSTRAINTS=true - no constraints * AIRFLOW_CONSTRAINTS_LOCATION - constraints from this location (url) * AIRFLOW_CONSTRAINTS_REFERENCE + constraints mode if specified * if we know airflow version "constraints-VERSION" + constraints mode * DEFAULT_CONSTRAINT_BRANCH + constraints mode - as fallback * constraints-main + constraints mode - as fallback """ if airflow_skip_constraints: return None if airflow_constraints_location: console.print(f"[info]Using constraints from location: {airflow_constraints_location}") return airflow_constraints_location if airflow_constraints_reference: console.print( f"[info]Building constraints location from " f"constraints reference: {airflow_constraints_reference}" ) elif airflow_package_version: if re.match(r"[0-9]+\.[0-9]+\.[0-9]+[0-9a-z.]*|main|v[0-9]_.*", airflow_package_version): airflow_constraints_reference = f"constraints-{airflow_package_version}" console.print( f"[info]Determined constraints reference from airflow package version " f"{airflow_package_version} as: {airflow_constraints_reference}" ) else: airflow_constraints_reference = default_constraints_branch console.print(f"[info]Falling back tp: {default_constraints_branch}") return calculate_constraints_location( constraints_mode=airflow_constraints_mode, constraints_reference=airflow_constraints_reference, github_repository=github_repository, python_version=python_version, providers=False, )
For providers we determine constraints in this order of preference: * PROVIDERS_SKIP_CONSTRAINTS=true - no constraints * PROVIDERS_CONSTRAINTS_LOCATION - constraints from this location (url) * PROVIDERS_CONSTRAINTS_REFERENCE + constraints mode if specified * DEFAULT_CONSTRAINT_BRANCH + constraints mode * constraints-main + constraints mode - as fallback
def get_providers_constraints_location( default_constraints_branch: str, github_repository: str, providers_constraints_location: str | None, providers_constraints_mode: str, providers_constraints_reference: str | None, providers_skip_constraints: bool, python_version: str, ) -> str | None: """For providers we determine constraints in this order of preference: * PROVIDERS_SKIP_CONSTRAINTS=true - no constraints * PROVIDERS_CONSTRAINTS_LOCATION - constraints from this location (url) * PROVIDERS_CONSTRAINTS_REFERENCE + constraints mode if specified * DEFAULT_CONSTRAINT_BRANCH + constraints mode * constraints-main + constraints mode - as fallback """ if providers_skip_constraints: return None if providers_constraints_location: console.print(f"[info]Using constraints from location: {providers_constraints_location}") return providers_constraints_location if not providers_constraints_reference: providers_constraints_reference = default_constraints_branch return calculate_constraints_location( constraints_mode=providers_constraints_mode, constraints_reference=providers_constraints_reference, python_version=python_version, github_repository=github_repository, providers=True, )
Diffs constraints files and prints the diff to the console.
def diff_constraints(config_params: ConfigParams) -> None: """ Diffs constraints files and prints the diff to the console. """ console.print("[bright_blue]Diffing constraints files") result = run_command( [ "diff", "--ignore-matching-lines=#", "--color=always", config_params.latest_constraints_file.as_posix(), config_params.current_constraints_file.as_posix(), ], # always shows output directly in CI without folded group github_actions=False, check=False, ) if result.returncode == 0: console.print("[green]No changes in constraints files. exiting") config_params.constraints_diff_file.unlink(missing_ok=True) return result = run_command( [ "diff", "--ignore-matching-lines=#", "--color=never", config_params.latest_constraints_file.as_posix(), config_params.current_constraints_file.as_posix(), ], github_actions=config_params.github_actions, check=False, text=True, capture_output=True, ) with config_params.constraints_diff_file.open("w") as diff_file: diff_file.write( f"Dependencies {config_params.airflow_constraints_mode} updated " f"for Python {config_params.python}\n\n" ) diff_file.write("```diff\n") diff_file.write(result.stdout) diff_file.write("```\n") console.print(f"[green]Diff generated to file: {config_params.constraints_diff_file}")
Generates constraints with provider dependencies used from current sources. This might be different from the constraints generated from the latest released version of the providers in PyPI. Those constraints are used in CI builds when we install providers built using current sources and in Breeze CI image builds.
def generate_constraints_source_providers(config_params: ConfigParams) -> None: """ Generates constraints with provider dependencies used from current sources. This might be different from the constraints generated from the latest released version of the providers in PyPI. Those constraints are used in CI builds when we install providers built using current sources and in Breeze CI image builds. """ with config_params.current_constraints_file.open("w") as constraints_file: constraints_file.write(SOURCE_PROVIDERS_CONSTRAINTS_PREFIX) freeze_packages_to_file(config_params, constraints_file) download_latest_constraint_file(config_params) diff_constraints(config_params)
Generates constraints with provider installed from PyPI. This is the default constraints file used in production/release builds when we install providers from PyPI and when tagged, those providers are used by our users to install Airflow in reproducible way. :return:
def generate_constraints_pypi_providers(config_params: ConfigParams) -> None: """ Generates constraints with provider installed from PyPI. This is the default constraints file used in production/release builds when we install providers from PyPI and when tagged, those providers are used by our users to install Airflow in reproducible way. :return: """ dist_dir = Path("/dist") all_provider_packages = get_all_active_provider_packages(python_version=config_params.python) chicken_egg_prefixes = [] packages_to_install = [] console.print("[bright_blue]Installing Airflow with PyPI providers with eager upgrade") if config_params.chicken_egg_providers: for chicken_egg_provider in config_params.chicken_egg_providers.split(" "): chicken_egg_prefixes.append(f"apache-airflow-providers-{chicken_egg_provider.replace('.','-')}") console.print( f"[bright_blue]Checking if {chicken_egg_prefixes} are available in local dist folder " f"as chicken egg providers)" ) for provider_package in all_provider_packages: if config_params.chicken_egg_providers and provider_package.startswith(tuple(chicken_egg_prefixes)): glob_pattern = f"{provider_package.replace('-','_')}-*.whl" console.print( f"[bright_blue]Checking if {provider_package} is available in local dist folder " f"with {glob_pattern} pattern" ) files = dist_dir.glob(glob_pattern) for file in files: console.print( f"[yellow]Installing {file.name} from local dist folder as it is " f"a chicken egg provider" ) packages_to_install.append(f"{provider_package} @ file://{file.as_posix()}") break else: console.print( f"[yellow]Skipping {provider_package} as it is not found in dist folder to install." ) # Skip checking if chicken egg provider is available in PyPI - it does not have to be there continue console.print(f"[bright_blue]Checking if {provider_package} is available in PyPI: ... ", end="") r = requests.head(f"https://pypi.org/pypi/{provider_package}/json", timeout=60) if r.status_code == 200: console.print("[green]OK") packages_to_install.append(provider_package) else: console.print("[yellow]NOK. Skipping.") run_command( cmd=[ *config_params.get_install_command, "-e", ".[all-core]", *packages_to_install, *config_params.eager_upgrade_additional_requirements_list, *config_params.get_resolution_highest_args, ], github_actions=config_params.github_actions, check=True, ) console.print("[success]Installed airflow with PyPI providers with eager upgrade.") with config_params.current_constraints_file.open("w") as constraints_file: constraints_file.write(PYPI_PROVIDERS_CONSTRAINTS_PREFIX) freeze_packages_to_file(config_params, constraints_file) download_latest_constraint_file(config_params) diff_constraints(config_params)
Generates constraints without any provider dependencies. This is used mostly to generate SBOM files - where we generate list of dependencies for Airflow without any provider installed.
def generate_constraints_no_providers(config_params: ConfigParams) -> None: """ Generates constraints without any provider dependencies. This is used mostly to generate SBOM files - where we generate list of dependencies for Airflow without any provider installed. """ uninstall_all_packages(config_params) console.print( "[bright_blue]Installing airflow with `all-core` extras only with eager upgrade in " "installable mode." ) install_local_airflow_with_eager_upgrade(config_params) console.print("[success]Installed airflow with [all-core] extras only with eager upgrade.") with config_params.current_constraints_file.open("w") as constraints_file: constraints_file.write(NO_PROVIDERS_CONSTRAINTS_PREFIX) freeze_packages_to_file(config_params, constraints_file) download_latest_constraint_file(config_params) diff_constraints(config_params)
Integration names must be globally unique.
def check_integration_duplicates(yaml_files: dict[str, dict]) -> tuple[int, int]: """Integration names must be globally unique.""" num_errors = 0 all_integrations = get_all_integration_names(yaml_files) num_integrations = len(all_integrations) duplicates = [(k, v) for (k, v) in Counter(all_integrations).items() if v > 1] if duplicates: console.print( "Duplicate integration names found. Integration names must be globally unique. " "Please delete duplicates." ) errors.append(tabulate(duplicates, headers=["Integration name", "Number of occurrences"])) num_errors += 1 return num_integrations, num_errors
Convert Bytes into Gigabytes 1 Gigabytes = 1024*1024*1024 = 1073741824 bytes
def get_size(bytes): """ Convert Bytes into Gigabytes 1 Gigabytes = 1024*1024*1024 = 1073741824 bytes """ factor = 1024**3 value_gb = bytes / factor return value_gb
Use gsutil to get resources in bytes for memory and disk
def resoure_check(): """ Use gsutil to get resources in bytes for memory and disk """ MINIMUM_ALLOWED_MEMORY = 4 MINIMUM_ALLOWED_CPUS = 2 MINIMUM_ALLOWED_DISK = 20 print("\nChecking resources.\n") # Memory current available svmem = psutil.virtual_memory() mem_available = round(get_size(svmem.available)) # Cpus current available cpus_available = psutil.cpu_count(logical=True) # Disk current available partition_usage = psutil.disk_usage("/") disk_available = round(get_size(partition_usage.free)) resources: dict[str, Resource] = { "Memory": Resource(current=mem_available, minimumAllowed=MINIMUM_ALLOWED_MEMORY), "Cpus": Resource(current=cpus_available, minimumAllowed=MINIMUM_ALLOWED_CPUS), "Disk": Resource(current=disk_available, minimumAllowed=MINIMUM_ALLOWED_DISK), } return resources
Imports all classes in providers packages. This method loads and imports all the classes found in providers, so that we can find all the subclasses of operators/sensors etc. :param walkable_paths_and_prefixes: dict of paths with accompanying prefixes to look the provider packages in :param prefix: prefix to add :param provider_ids - provider ids that should be loaded. :param print_imports - if imported class should also be printed in output :param print_skips - if skipped classes should also be printed in output :return: tuple of list of all imported classes and
def import_all_classes( walkable_paths_and_prefixes: dict[str, str], prefix: str, provider_ids: list[str] | None = None, print_imports: bool = False, print_skips: bool = False, ) -> tuple[list[str], list[str]]: """Imports all classes in providers packages. This method loads and imports all the classes found in providers, so that we can find all the subclasses of operators/sensors etc. :param walkable_paths_and_prefixes: dict of paths with accompanying prefixes to look the provider packages in :param prefix: prefix to add :param provider_ids - provider ids that should be loaded. :param print_imports - if imported class should also be printed in output :param print_skips - if skipped classes should also be printed in output :return: tuple of list of all imported classes and """ console.print() console.print(f"Walking all package with prefixes in {walkable_paths_and_prefixes}") console.print() imported_classes = [] classes_with_potential_circular_import = [] tracebacks: list[tuple[str, str]] = [] printed_packages: set[str] = set() def mk_prefix(provider_id): return f"{prefix}{provider_id}" if provider_ids: provider_prefixes = tuple(mk_prefix(provider_id) for provider_id in provider_ids) else: provider_prefixes = (prefix,) def onerror(_): nonlocal tracebacks exception_string = traceback.format_exc() for provider_prefix in provider_prefixes: if provider_prefix in exception_string: start_index = exception_string.find(provider_prefix) end_index = exception_string.find("\n", start_index + len(provider_prefix)) package = exception_string[start_index:end_index] tracebacks.append((package, exception_string)) break for path, prefix in walkable_paths_and_prefixes.items(): for modinfo in pkgutil.walk_packages(path=[path], prefix=prefix, onerror=onerror): if not modinfo.name.startswith(provider_prefixes): if print_skips: console.print(f"Skipping module: {modinfo.name}") continue if print_imports: package_to_print = modinfo.name.rpartition(".")[0] if package_to_print not in printed_packages: printed_packages.add(package_to_print) console.print(f"Importing package: {package_to_print}") try: with warnings.catch_warnings(record=True): warnings.filterwarnings("always", category=DeprecationWarning) _module = importlib.import_module(modinfo.name) for attribute_name in dir(_module): class_name = modinfo.name + "." + attribute_name attribute = getattr(_module, attribute_name) if isclass(attribute): imported_classes.append(class_name) if isclass(attribute) and ( issubclass(attribute, logging.Handler) or issubclass(attribute, BaseSecretsBackend) ): classes_with_potential_circular_import.append(class_name) except AirflowOptionalProviderFeatureException: # We ignore optional features ... except Exception as e: # skip the check as we are temporary vendoring in the google ads client with wrong package if "No module named 'google.ads.googleads.v12'" not in str(e): exception_str = traceback.format_exc() tracebacks.append((modinfo.name, exception_str)) if tracebacks: if IS_AIRFLOW_VERSION_PROVIDED: console.print( f""" [red]ERROR: There were some import errors[/] [yellow]Detected that this job is about installing providers in {USE_AIRFLOW_VERSION}[/], [yellow]most likely you are using features that are not available in Airflow {USE_AIRFLOW_VERSION}[/] [yellow]and you must implement them in backwards-compatible way![/] """, ) console.print("[red]----------------------------------------[/]") for package, trace in tracebacks: console.print(f"Exception when importing: {package}\n\n") console.print(trace) console.print("[red]----------------------------------------[/]") sys.exit(1) else: return imported_classes, classes_with_potential_circular_import
Is the class imported from another module? :param the_class: the class object itself :param imported_name: name of the imported class :return: true if the class was imported from another module
def is_imported_from_same_module(the_class: str, imported_name: str) -> bool: """Is the class imported from another module? :param the_class: the class object itself :param imported_name: name of the imported class :return: true if the class was imported from another module """ return imported_name.rpartition(":")[0] == the_class.__module__
Is the class an example_dag class? :param imported_name: name where the class is imported from :return: true if it is an example_dags class
def is_example_dag(imported_name: str) -> bool: """Is the class an example_dag class? :param imported_name: name where the class is imported from :return: true if it is an example_dags class """ return ".example_dags." in imported_name
Returns true if the class is from the package expected. :param the_class: the class object :param expected_package: package expected for the class
def is_from_the_expected_base_package(the_class: type, expected_package: str) -> bool: """Returns true if the class is from the package expected. :param the_class: the class object :param expected_package: package expected for the class """ return the_class.__module__.startswith(expected_package)
Returns true if the class inherits (directly or indirectly) from the class specified. :param the_class: The class to check :param expected_ancestor: expected class to inherit from :return: true is the class inherits from the class expected
def inherits_from(the_class: type, expected_ancestor: type | None = None) -> bool: """Returns true if the class inherits (directly or indirectly) from the class specified. :param the_class: The class to check :param expected_ancestor: expected class to inherit from :return: true is the class inherits from the class expected """ if expected_ancestor is None: return False import inspect mro = inspect.getmro(the_class) return the_class is not expected_ancestor and expected_ancestor in mro
Returns true if the object passed is a class. :param the_class: the class to pass :return: true if it is a class
def is_class(the_class: type) -> bool: """Returns true if the object passed is a class. :param the_class: the class to pass :return: true if it is a class """ import inspect return inspect.isclass(the_class)
In case expected_pattern is set, it checks if the package name matches the pattern. :param the_class: imported class :param expected_pattern: the pattern that should match the package :return: true if the expected_pattern is None or the pattern matches the package
def package_name_matches(the_class: type, expected_pattern: str | None = None) -> bool: """In case expected_pattern is set, it checks if the package name matches the pattern. :param the_class: imported class :param expected_pattern: the pattern that should match the package :return: true if the expected_pattern is None or the pattern matches the package """ return expected_pattern is None or re.match(expected_pattern, the_class.__module__) is not None
Converts new entities to a Markdown table. :param entity_type: entity type to convert to markup :param entities: list of entities :param full_package_name: name of the provider package :return: table of new classes
def convert_classes_to_table(entity_type: EntityType, entities: list[str], full_package_name: str) -> str: """Converts new entities to a Markdown table. :param entity_type: entity type to convert to markup :param entities: list of entities :param full_package_name: name of the provider package :return: table of new classes """ from tabulate import tabulate headers = [f"New Airflow 2.0 {entity_type.value.lower()}: `{full_package_name}` package"] table = [(get_class_code_link(full_package_name, class_name, "main"),) for class_name in entities] return tabulate(table, headers=headers, tablefmt="pipe")
Get details about entities. :param entity_type: type of entity (Operators, Hooks etc.) :param entities: set of entities found :param wrong_entities: wrong entities found for that type :param full_package_name: full package name
def get_details_about_classes( entity_type: EntityType, entities: set[str], wrong_entities: list[tuple[type, str]], full_package_name: str, ) -> EntityTypeSummary: """Get details about entities. :param entity_type: type of entity (Operators, Hooks etc.) :param entities: set of entities found :param wrong_entities: wrong entities found for that type :param full_package_name: full package name """ all_entities = sorted(entities) TOTALS[entity_type] += len(all_entities) return EntityTypeSummary( entities=all_entities, new_entities_table=convert_classes_to_table( entity_type=entity_type, entities=all_entities, full_package_name=full_package_name, ), wrong_entities=wrong_entities, )
Strips base package name from the class (if it starts with the package name).
def strip_package_from_class(base_package: str, class_name: str) -> str: """Strips base package name from the class (if it starts with the package name).""" if class_name.startswith(base_package): return class_name[len(base_package) + 1 :] else: return class_name
Converts the class name to URL that the class can be reached. :param base_url: base URL to use :param class_name: name of the class :return: URL to the class
def convert_class_name_to_url(base_url: str, class_name) -> str: """Converts the class name to URL that the class can be reached. :param base_url: base URL to use :param class_name: name of the class :return: URL to the class """ return base_url + class_name.rpartition(".")[0].replace(".", "/") + ".py"
Provides a Markdown link for the class passed as parameter. :param base_package: base package to strip from most names :param class_name: name of the class :param git_tag: tag to use for the URL link :return: URL to the class
def get_class_code_link(base_package: str, class_name: str, git_tag: str) -> str: """Provides a Markdown link for the class passed as parameter. :param base_package: base package to strip from most names :param class_name: name of the class :param git_tag: tag to use for the URL link :return: URL to the class """ url_prefix = f"https://github.com/apache/airflow/blob/{git_tag}/" return ( f"[{strip_package_from_class(base_package, class_name)}]" f"({convert_class_name_to_url(url_prefix, class_name)})" )
Prints wrong entities of a given entity type if there are any. :param entity_type: type of the class to print :param wrong_classes: list of wrong entities
def print_wrong_naming(entity_type: EntityType, wrong_classes: list[tuple[type, str]]): """Prints wrong entities of a given entity type if there are any. :param entity_type: type of the class to print :param wrong_classes: list of wrong entities """ if wrong_classes: console.print(f"\n[red]There are wrongly named entities of type {entity_type}:[/]\n") for wrong_entity_type, message in wrong_classes: console.print(f"{wrong_entity_type}: {message}")
Returns set of entities containing all subclasses in package specified. :param imported_classes: entities imported from providers :param base_package: base package name where to start looking for the entities :param sub_package_pattern_match: this string is expected to appear in the sub-package name :param ancestor_match: type of the object the method looks for :param expected_class_name_pattern: regexp of class name pattern to expect :param unexpected_class_name_patterns: set of regexp of class name pattern that are not expected :param exclude_class_type: exclude class of this type (Sensor are also Operators, so they should be excluded from the list) :param false_positive_class_names: set of class names that are wrongly recognised as badly named
def find_all_entities( imported_classes: list[str], base_package: str, ancestor_match: type, sub_package_pattern_match: str, expected_class_name_pattern: str, unexpected_class_name_patterns: set[str], exclude_class_type: type | None = None, false_positive_class_names: set[str] | None = None, ) -> VerifiedEntities: """Returns set of entities containing all subclasses in package specified. :param imported_classes: entities imported from providers :param base_package: base package name where to start looking for the entities :param sub_package_pattern_match: this string is expected to appear in the sub-package name :param ancestor_match: type of the object the method looks for :param expected_class_name_pattern: regexp of class name pattern to expect :param unexpected_class_name_patterns: set of regexp of class name pattern that are not expected :param exclude_class_type: exclude class of this type (Sensor are also Operators, so they should be excluded from the list) :param false_positive_class_names: set of class names that are wrongly recognised as badly named """ found_entities: set[str] = set() wrong_entities: list[tuple[type, str]] = [] for imported_name in imported_classes: module, class_name = imported_name.rsplit(".", maxsplit=1) the_class = getattr(importlib.import_module(module), class_name) if ( is_class(the_class=the_class) and not is_example_dag(imported_name=imported_name) and is_from_the_expected_base_package(the_class=the_class, expected_package=base_package) and is_imported_from_same_module(the_class=the_class, imported_name=imported_name) and inherits_from(the_class=the_class, expected_ancestor=ancestor_match) and not inherits_from(the_class=the_class, expected_ancestor=exclude_class_type) and package_name_matches(the_class=the_class, expected_pattern=sub_package_pattern_match) ): if not false_positive_class_names or class_name not in false_positive_class_names: if not re.match(expected_class_name_pattern, class_name): wrong_entities.append( ( the_class, f"The class name {class_name} is wrong. " f"It should match {expected_class_name_pattern}", ) ) continue if unexpected_class_name_patterns: for unexpected_class_name_pattern in unexpected_class_name_patterns: if re.match(unexpected_class_name_pattern, class_name): wrong_entities.append( ( the_class, f"The class name {class_name} is wrong. " f"It should not match {unexpected_class_name_pattern}", ) ) found_entities.add(imported_name) return VerifiedEntities(all_entities=found_entities, wrong_entities=wrong_entities)
Gets summary of the package in the form of dictionary containing all types of entities. :param full_package_name: full package name :param imported_classes: entities imported_from providers :return: dictionary of objects usable as context for JINJA2 templates, or None if there are some errors
def get_package_class_summary( full_package_name: str, imported_classes: list[str] ) -> dict[EntityType, EntityTypeSummary]: """Gets summary of the package in the form of dictionary containing all types of entities. :param full_package_name: full package name :param imported_classes: entities imported_from providers :return: dictionary of objects usable as context for JINJA2 templates, or None if there are some errors """ from airflow.hooks.base import BaseHook from airflow.models.baseoperator import BaseOperator from airflow.secrets import BaseSecretsBackend from airflow.sensors.base import BaseSensorOperator from airflow.triggers.base import BaseTrigger # Remove this conditional check after providers are 2.6+ compatible try: from airflow.notifications.basenotifier import BaseNotifier has_notifier = True except ImportError: has_notifier = False all_verified_entities: dict[EntityType, VerifiedEntities] = { EntityType.Operators: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.operators\..*", ancestor_match=BaseOperator, expected_class_name_pattern=OPERATORS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN}, exclude_class_type=BaseSensorOperator, false_positive_class_names={ "ProduceToTopicOperator", "CloudVisionAddProductToProductSetOperator", "CloudDataTransferServiceGCSToGCSOperator", "CloudDataTransferServiceS3ToGCSOperator", "BigQueryCreateDataTransferOperator", "CloudTextToSpeechSynthesizeOperator", "CloudSpeechToTextRecognizeSpeechOperator", }, ), EntityType.Sensors: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.sensors\..*", ancestor_match=BaseSensorOperator, expected_class_name_pattern=SENSORS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, SENSORS_PATTERN}, ), EntityType.Hooks: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.hooks\..*", ancestor_match=BaseHook, expected_class_name_pattern=HOOKS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {HOOKS_PATTERN}, ), EntityType.Secrets: find_all_entities( imported_classes=imported_classes, sub_package_pattern_match=r".*\.secrets\..*", base_package=full_package_name, ancestor_match=BaseSecretsBackend, expected_class_name_pattern=SECRETS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {SECRETS_PATTERN}, ), EntityType.Transfers: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.transfers\..*", ancestor_match=BaseOperator, expected_class_name_pattern=TRANSFERS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, TRANSFERS_PATTERN}, ), EntityType.Trigger: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.triggers\..*", ancestor_match=BaseTrigger, expected_class_name_pattern=TRIGGER_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {TRIGGER_PATTERN}, ), } if has_notifier: all_verified_entities[EntityType.Notification] = find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.notifications\..*", ancestor_match=BaseNotifier, expected_class_name_pattern=NOTIFICATION_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {NOTIFICATION_PATTERN}, ) else: all_verified_entities[EntityType.Notification] = VerifiedEntities( all_entities=set(), wrong_entities=[] ) for entity in EntityType: print_wrong_naming(entity, all_verified_entities[entity].wrong_entities) entities_summary: dict[EntityType, EntityTypeSummary] = {} for entity_type in EntityType: entities_summary[entity_type] = get_details_about_classes( entity_type, all_verified_entities[entity_type].all_entities, all_verified_entities[entity_type].wrong_entities, full_package_name, ) return entities_summary
Checks if the string passed is Camel Case (with capitalised acronyms allowed). :param s: string to check :return: true if the name looks cool as Class name.
def is_camel_case_with_acronyms(s: str): """Checks if the string passed is Camel Case (with capitalised acronyms allowed). :param s: string to check :return: true if the name looks cool as Class name. """ if s and s[0] == "_": # Leading underscores are fine. s = s[1:] if not s: return True return s[0].isupper() and not (s.islower() or s.isupper() or "_" in s)
Check if all entities in the dictionary are named properly. It prints names at the output and returns the status of class names. :param entity_summary: dictionary of class names to check, grouped by types. :return: Tuple of 2 ints = total number of entities and number of badly named entities
def check_if_classes_are_properly_named( entity_summary: dict[EntityType, EntityTypeSummary], ) -> tuple[int, int]: """Check if all entities in the dictionary are named properly. It prints names at the output and returns the status of class names. :param entity_summary: dictionary of class names to check, grouped by types. :return: Tuple of 2 ints = total number of entities and number of badly named entities """ total_class_number = 0 badly_named_class_number = 0 for entity_type, class_suffix in EXPECTED_SUFFIXES.items(): for class_full_name in entity_summary[entity_type].entities: _, class_name = class_full_name.rsplit(".", maxsplit=1) error_encountered = False if ( class_name.startswith("send_") and class_name.endswith("_notification") and entity_type == EntityType.Notification ): continue if not is_camel_case_with_acronyms(class_name): console.print( f"[red]The class {class_full_name} is wrongly named. The " f"class name should be CamelCaseWithACRONYMS optionally " f"with a single leading underscore[/]" ) error_encountered = True if not class_name.endswith(class_suffix): console.print( f"[red]The class {class_full_name} is wrongly named. It is one of the {entity_type.value}" f" so it should end with {class_suffix}[/]" ) error_encountered = True total_class_number += 1 if error_encountered: badly_named_class_number += 1 return total_class_number, badly_named_class_number
Verify naming of provider classes for single provider.
def verify_provider_classes_for_single_provider(imported_classes: list[str], provider_package_id: str): """Verify naming of provider classes for single provider.""" full_package_name = f"airflow.providers.{provider_package_id}" entity_summaries = get_package_class_summary(full_package_name, imported_classes) total, bad = check_if_classes_are_properly_named(entity_summaries) bad += sum(len(entity_summary.wrong_entities) for entity_summary in entity_summaries.values()) if bad != 0: console.print() console.print(f"[red]There are {bad} errors of {total} entities for {provider_package_id}[/]") console.print() return total, bad
Summarises Bad/Good class names for providers
def summarise_total_vs_bad(total: int, bad: int) -> bool: """Summarises Bad/Good class names for providers""" if bad == 0: console.print() console.print(f"[green]OK: All {total} entities are properly named[/]") console.print() console.print("Totals:") console.print() for entity in EntityType: console.print(f"{entity.value}: {TOTALS[entity]}") console.print() else: console.print() if os.environ.get("CI") != "": console.print("::endgroup::") console.print( f"[red]ERROR! There are in total: {bad} entities badly named out of {total} entities[/]" ) console.print() console.print("[red]Please fix the problems listed above [/]") return False return True
Find namespace packages. This needs to be done manually as ``walk_packages`` does not support namespaced packages and PEP 420. :param walkable_paths_and_prefixes: pats :param provider_path: :param provider_prefix:
def add_all_namespaced_packages( walkable_paths_and_prefixes: dict[str, str], provider_path: str, provider_prefix: str ): """Find namespace packages. This needs to be done manually as ``walk_packages`` does not support namespaced packages and PEP 420. :param walkable_paths_and_prefixes: pats :param provider_path: :param provider_prefix: """ main_path = Path(provider_path).resolve() for candidate_path in main_path.rglob("*"): if candidate_path.name == "__pycache__": continue if candidate_path.is_dir() and not (candidate_path / "__init__.py").exists(): subpackage = str(candidate_path.relative_to(main_path)).replace(os.sep, ".") walkable_paths_and_prefixes[str(candidate_path)] = provider_prefix + subpackage + "."
Verifies all provider classes. :return: Tuple: list of all classes and list of all classes that have potential recursion side effects
def verify_provider_classes() -> tuple[list[str], list[str]]: """Verifies all provider classes. :return: Tuple: list of all classes and list of all classes that have potential recursion side effects """ provider_ids = get_all_providers() walkable_paths_and_prefixes: dict[str, str] = {} provider_prefix = "airflow.providers." for provider_path in get_providers_paths(): walkable_paths_and_prefixes[provider_path] = provider_prefix add_all_namespaced_packages(walkable_paths_and_prefixes, provider_path, provider_prefix) imported_classes, classes_with_potential_circular_import = import_all_classes( walkable_paths_and_prefixes=walkable_paths_and_prefixes, provider_ids=provider_ids, print_imports=True, prefix="airflow.providers.", ) total = 0 bad = 0 for provider_package_id in provider_ids: inc_total, inc_bad = verify_provider_classes_for_single_provider( imported_classes, provider_package_id ) total += inc_total bad += inc_bad if not summarise_total_vs_bad(total, bad): sys.exit(1) if not imported_classes: console.print("[red]Something is seriously wrong - no classes imported[/]") sys.exit(1) console.print() console.print("[green]SUCCESS: All provider packages are importable![/]\n") console.print(f"Imported {len(imported_classes)} classes.") console.print() return imported_classes, classes_with_potential_circular_import
check if the user provided any extra packages to install. defaults to package 'devel'.
def check_for_package_extras() -> str: """ check if the user provided any extra packages to install. defaults to package 'devel'. """ if len(sys.argv) > 1: if len(sys.argv) > 2: print('Provide extras as 1 argument like: "devel,google,snowflake"') sys.exit(1) return sys.argv[1] return "devel"
install the requirements of the current python version. return 0 if success, anything else is an error.
def pip_install_requirements() -> int: """ install the requirements of the current python version. return 0 if success, anything else is an error. """ extras = check_for_package_extras() print( f""" Installing requirements. Airflow is installed with "{extras}" extra. ---------------------------------------------------------------------------------------- IMPORTANT NOTE ABOUT EXTRAS !!! You can specify extras as single coma-separated parameter to install. For example * devel - to have all development dependencies required to test core. * devel-* - to selectively install tools that we use to run scripts, tests, static checks etc. * google,amazon,microsoft_azure - to install dependencies needed at runtime by specified providers * devel-all-dbs - to have all development dependencies required for all DB providers * devel-all - to have all development dependencies required for all providers Note that "devel-all" installs all possible dependencies and we have > 600 of them, which might not be possible to install cleanly on your host because of lack of system packages. It's easier to install extras one-by-one as needed. ---------------------------------------------------------------------------------------- """ ) version = get_python_version() constraint = ( f"https://raw.githubusercontent.com/apache/airflow/constraints-main/" f"constraints-source-providers-{version}.txt" ) pip_install_command = ["pip", "install", "-e", f".[{extras}]", "--constraint", constraint] quoted_command = " ".join([shlex.quote(parameter) for parameter in pip_install_command]) print() print(f"Running command: \n {quoted_command}\n") e = subprocess.run(pip_install_command) return e.returncode
return the version of python we are running.
def get_python_version() -> str: """ return the version of python we are running. """ major = sys.version_info[0] minor = sys.version_info[1] return f"{major}.{minor}"
Setup local virtual environment.
def main(): """ Setup local virtual environment. """ airflow_home_dir = Path(os.environ.get("AIRFLOW_HOME", Path.home() / "airflow")) airflow_sources = Path(__file__).resolve().parents[2] if not check_if_in_virtualenv(): print( "Local virtual environment not activated.\nPlease create and activate it " "first. (for example using 'python3 -m venv venv && source venv/bin/activate')" ) sys.exit(1) print("Initializing environment...") print(f"This will remove the folder {airflow_home_dir} and reset all the databases!") response = input("Are you sure? (y/N/q)") if response != "y": sys.exit(2) print(f"\nWiping and recreating {airflow_home_dir}") if airflow_home_dir == airflow_sources: print("AIRFLOW_HOME and Source code are in the same path") print( f"When running this script it will delete all files in path {airflow_home_dir} " "to clear dynamic files like config/logs/db" ) print("Please move the airflow source code elsewhere to avoid deletion") sys.exit(3) clean_up_airflow_home(airflow_home_dir) return_code = pip_install_requirements() if return_code != 0: print( "To solve persisting issues with the installation, you might need the " "prerequisites installed on your system.\n " "Try running the command below and rerun virtualenv installation\n" ) os_type = sys.platform if os_type == "darwin": print("brew install sqlite mysql postgresql openssl") print('export LDFLAGS="-L/usr/local/opt/openssl/lib"') print('export CPPFLAGS="-I/usr/local/opt/openssl/include"') else: print( "sudo apt install build-essential python3-dev libsqlite3-dev openssl " "sqlite default-libmysqlclient-dev libmysqlclient-dev postgresql" ) sys.exit(4) print("\nResetting AIRFLOW sqlite database...") env = os.environ.copy() env["AIRFLOW__CORE__LOAD_EXAMPLES"] = "False" env["AIRFLOW__CORE__UNIT_TEST_MODE"] = "False" env["AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_ENABLED"] = "False" env["AIRFLOW__CORE__DAGS_FOLDER"] = f"{airflow_sources}/empty" env["AIRFLOW__CORE__PLUGINS_FOLDER"] = f"{airflow_sources}/empty" subprocess.run(["airflow", "db", "reset", "--yes"], env=env) print("\nResetting AIRFLOW sqlite unit test database...") env = os.environ.copy() env["AIRFLOW__CORE__LOAD_EXAMPLES"] = "True" env["AIRFLOW__CORE__UNIT_TEST_MODE"] = "False" env["AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_ENABLED"] = "False" env["AIRFLOW__CORE__DAGS_FOLDER"] = f"{airflow_sources}/empty" env["AIRFLOW__CORE__PLUGINS_FOLDER"] = f"{airflow_sources}/empty" subprocess.run(["airflow", "db", "reset", "--yes"], env=env) print("\nInitialization of environment complete! Go ahead and develop Airflow!")
Resets env variables.
def reset_environment(): """Resets env variables.""" init_env = os.environ.copy() yield changed_env = os.environ for key in changed_env: if key not in init_env: del os.environ[key] else: os.environ[key] = init_env[key]
Return secret key configured.
def secret_key() -> str: """Return secret key configured.""" from airflow.configuration import conf the_key = conf.get("webserver", "SECRET_KEY") if the_key is None: raise RuntimeError( "The secret key SHOULD be configured as `[webserver] secret_key` in the " "configuration/environment at this stage! " ) return the_key
Resets Airflow db.
def reset_db(): """Resets Airflow db.""" from airflow.utils import db db.resetdb()
Add options parser for custom plugins.
def pytest_addoption(parser): """Add options parser for custom plugins.""" group = parser.getgroup("airflow") group.addoption( "--with-db-init", action="store_true", dest="db_init", help="Forces database initialization before tests", ) group.addoption( "--integration", action="append", dest="integration", metavar="INTEGRATIONS", help="only run tests matching integration specified: " "[cassandra,kerberos,mongo,celery,statsd,trino]. ", ) group.addoption( "--skip-db-tests", action="store_true", dest="skip_db_tests", help="skip tests that require database", ) group.addoption( "--run-db-tests-only", action="store_true", dest="run_db_tests_only", help="only run tests requiring database", ) group.addoption( "--backend", action="store", dest="backend", metavar="BACKEND", help="only run tests matching the backend: [sqlite,postgres,mysql].", ) group.addoption( "--system", action="append", dest="system", metavar="SYSTEMS", help="only run tests matching the system specified [google.cloud, google.marketing_platform]", ) group.addoption( "--include-long-running", action="store_true", dest="include_long_running", help="Includes long running tests (marked with long_running marker). They are skipped by default.", ) group.addoption( "--include-quarantined", action="store_true", dest="include_quarantined", help="Includes quarantined tests (marked with quarantined marker). They are skipped by default.", ) group.addoption( "--exclude-virtualenv-operator", action="store_true", dest="exclude_virtualenv_operator", help="Excludes virtualenv operators tests (marked with virtualenv_test marker).", ) group.addoption( "--exclude-external-python-operator", action="store_true", dest="exclude_external_python_operator", help="Excludes external python operator tests (marked with external_python_test marker).", ) allowed_trace_sql_columns_list = ",".join(ALLOWED_TRACE_SQL_COLUMNS) group.addoption( "--trace-sql", action="store", dest="trace_sql", help=( "Trace SQL statements. As an argument, you must specify the columns to be " f"displayed as a comma-separated list. Supported values: [f{allowed_trace_sql_columns_list}]" ), metavar="COLUMNS", ) group.addoption( "--no-db-cleanup", action="store_false", dest="db_cleanup", help="Disable DB clear before each test module.", ) group.addoption( "--disable-capture-warnings", action="store_true", dest="disable_capture_warnings", help="Disable internal capture warnings.", ) group.addoption( "--warning-output-path", action="store", dest="warning_output_path", metavar="PATH", help=( "Path for resulting captured warnings. Absolute or relative to the `tests` directory. " "If not provided or environment variable `CAPTURE_WARNINGS_OUTPUT` not set " "then 'warnings.txt' will be used." ), )
Helper that setups Airflow testing environment.
def initialize_airflow_tests(request): """Helper that setups Airflow testing environment.""" print(" AIRFLOW ".center(60, "=")) # Setup test environment for breeze home = os.path.expanduser("~") airflow_home = os.environ.get("AIRFLOW_HOME") or os.path.join(home, "airflow") print(f"Home of the user: {home}\nAirflow home {airflow_home}") # Initialize Airflow db if required lock_file = os.path.join(airflow_home, ".airflow_db_initialised") if not skip_db_tests: if request.config.option.db_init: print("Initializing the DB - forced with --with-db-init switch.") initial_db_init() elif not os.path.exists(lock_file): print( "Initializing the DB - first time after entering the container.\n" "You can force re-initialization the database by adding --with-db-init switch to run-tests." ) initial_db_init() # Create pid file with open(lock_file, "w+"): pass else: print( "Skipping initializing of the DB as it was initialized already.\n" "You can re-initialize the database by adding --with-db-init flag when running tests." ) integration_kerberos = os.environ.get("INTEGRATION_KERBEROS") if integration_kerberos == "true": # Initialize kerberos kerberos = os.environ.get("KRB5_KTNAME") if kerberos: subprocess.check_call(["kinit", "-kt", kerberos, "[email protected]"]) else: print("Kerberos enabled! Please setup KRB5_KTNAME environment variable") sys.exit(1)
Use time-machine to "stub" sleep. This means the ``sleep()`` takes no time, but ``datetime.now()`` appears to move forwards. If your module under test does ``import time`` and then ``time.sleep``: .. code-block:: python def test_something(frozen_sleep): my_mod.fn_under_test() If your module under test does ``from time import sleep`` then you will have to mock that sleep function directly: .. code-block:: python def test_something(frozen_sleep, monkeypatch): monkeypatch.setattr("my_mod.sleep", frozen_sleep) my_mod.fn_under_test()
def frozen_sleep(monkeypatch): """Use time-machine to "stub" sleep. This means the ``sleep()`` takes no time, but ``datetime.now()`` appears to move forwards. If your module under test does ``import time`` and then ``time.sleep``: .. code-block:: python def test_something(frozen_sleep): my_mod.fn_under_test() If your module under test does ``from time import sleep`` then you will have to mock that sleep function directly: .. code-block:: python def test_something(frozen_sleep, monkeypatch): monkeypatch.setattr("my_mod.sleep", frozen_sleep) my_mod.fn_under_test() """ traveller = None def fake_sleep(seconds): nonlocal traveller utcnow = datetime.now(tz=timezone.utc) if traveller is not None: traveller.stop() traveller = time_machine.travel(utcnow + timedelta(seconds=seconds)) traveller.start() monkeypatch.setattr("time.sleep", fake_sleep) yield fake_sleep if traveller is not None: traveller.stop()
Fixture to help create DAG, DagModel, and SerializedDAG automatically. You have to use the dag_maker as a context manager and it takes the same argument as DAG:: with dag_maker(dag_id="mydag") as dag: task1 = EmptyOperator(task_id="mytask") task2 = EmptyOperator(task_id="mytask2") If the DagModel you want to use needs different parameters than the one automatically created by the dag_maker, you have to update the DagModel as below:: dag_maker.dag_model.is_active = False session.merge(dag_maker.dag_model) session.commit() For any test you use the dag_maker, make sure to create a DagRun:: dag_maker.create_dagrun() The dag_maker.create_dagrun takes the same arguments as dag.create_dagrun If you want to operate on serialized DAGs, then either pass ``serialized=True`` to the ``dag_maker()`` call, or you can mark your test/class/file with ``@pytest.mark.need_serialized_dag(True)``. In both of these cases the ``dag`` returned by the context manager will be a lazily-evaluated proxy object to the SerializedDAG.
def dag_maker(request): """Fixture to help create DAG, DagModel, and SerializedDAG automatically. You have to use the dag_maker as a context manager and it takes the same argument as DAG:: with dag_maker(dag_id="mydag") as dag: task1 = EmptyOperator(task_id="mytask") task2 = EmptyOperator(task_id="mytask2") If the DagModel you want to use needs different parameters than the one automatically created by the dag_maker, you have to update the DagModel as below:: dag_maker.dag_model.is_active = False session.merge(dag_maker.dag_model) session.commit() For any test you use the dag_maker, make sure to create a DagRun:: dag_maker.create_dagrun() The dag_maker.create_dagrun takes the same arguments as dag.create_dagrun If you want to operate on serialized DAGs, then either pass ``serialized=True`` to the ``dag_maker()`` call, or you can mark your test/class/file with ``@pytest.mark.need_serialized_dag(True)``. In both of these cases the ``dag`` returned by the context manager will be a lazily-evaluated proxy object to the SerializedDAG. """ import lazy_object_proxy # IMPORTANT: Delay _all_ imports from `airflow.*` to _inside a method_. # This fixture is "called" early on in the pytest collection process, and # if we import airflow.* here the wrong (non-test) config will be loaded # and "baked" in to various constants want_serialized = False # Allow changing default serialized behaviour with `@pytest.mark.need_serialized_dag` or # `@pytest.mark.need_serialized_dag(False)` serialized_marker = request.node.get_closest_marker("need_serialized_dag") if serialized_marker: (want_serialized,) = serialized_marker.args or (True,) from airflow.utils.log.logging_mixin import LoggingMixin class DagFactory(LoggingMixin): _own_session = False def __init__(self): from airflow.models import DagBag # Keep all the serialized dags we've created in this test self.dagbag = DagBag(os.devnull, include_examples=False, read_dags_from_db=False) def __enter__(self): self.dag.__enter__() if self.want_serialized: return lazy_object_proxy.Proxy(self._serialized_dag) return self.dag def _serialized_dag(self): return self.serialized_model.dag def get_serialized_data(self): try: data = self.serialized_model.data except AttributeError: raise RuntimeError("DAG serialization not requested") if isinstance(data, str): return json.loads(data) return data def __exit__(self, type, value, traceback): from airflow.models import DagModel from airflow.models.serialized_dag import SerializedDagModel dag = self.dag dag.__exit__(type, value, traceback) if type is not None: return dag.clear(session=self.session) dag.sync_to_db(processor_subdir=self.processor_subdir, session=self.session) self.dag_model = self.session.get(DagModel, dag.dag_id) if self.want_serialized: self.serialized_model = SerializedDagModel( dag, processor_subdir=self.dag_model.processor_subdir ) self.session.merge(self.serialized_model) serialized_dag = self._serialized_dag() self.dagbag.bag_dag(serialized_dag, root_dag=serialized_dag) self.session.flush() else: self.dagbag.bag_dag(self.dag, self.dag) def create_dagrun(self, **kwargs): from airflow.utils import timezone from airflow.utils.state import State from airflow.utils.types import DagRunType dag = self.dag kwargs = { "state": State.RUNNING, "start_date": self.start_date, "session": self.session, **kwargs, } # Need to provide run_id if the user does not either provide one # explicitly, or pass run_type for inference in dag.create_dagrun(). if "run_id" not in kwargs and "run_type" not in kwargs: kwargs["run_id"] = "test" if "run_type" not in kwargs: kwargs["run_type"] = DagRunType.from_run_id(kwargs["run_id"]) if kwargs.get("execution_date") is None: if kwargs["run_type"] == DagRunType.MANUAL: kwargs["execution_date"] = self.start_date else: kwargs["execution_date"] = dag.next_dagrun_info(None).logical_date if "data_interval" not in kwargs: logical_date = timezone.coerce_datetime(kwargs["execution_date"]) if kwargs["run_type"] == DagRunType.MANUAL: data_interval = dag.timetable.infer_manual_data_interval(run_after=logical_date) else: data_interval = dag.infer_automated_data_interval(logical_date) kwargs["data_interval"] = data_interval self.dag_run = dag.create_dagrun(**kwargs) for ti in self.dag_run.task_instances: ti.refresh_from_task(dag.get_task(ti.task_id)) return self.dag_run def create_dagrun_after(self, dagrun, **kwargs): next_info = self.dag.next_dagrun_info(self.dag.get_run_data_interval(dagrun)) if next_info is None: raise ValueError(f"cannot create run after {dagrun}") return self.create_dagrun( execution_date=next_info.logical_date, data_interval=next_info.data_interval, **kwargs, ) def __call__( self, dag_id="test_dag", serialized=want_serialized, fileloc=None, processor_subdir=None, session=None, **kwargs, ): from airflow import settings from airflow.models.dag import DAG from airflow.utils import timezone if session is None: self._own_session = True session = settings.Session() self.kwargs = kwargs self.session = session self.start_date = self.kwargs.get("start_date", None) default_args = kwargs.get("default_args", None) if default_args and not self.start_date: if "start_date" in default_args: self.start_date = default_args.get("start_date") if not self.start_date: if hasattr(request.module, "DEFAULT_DATE"): self.start_date = getattr(request.module, "DEFAULT_DATE") else: DEFAULT_DATE = timezone.datetime(2016, 1, 1) self.start_date = DEFAULT_DATE self.kwargs["start_date"] = self.start_date self.dag = DAG(dag_id, **self.kwargs) self.dag.fileloc = fileloc or request.module.__file__ self.want_serialized = serialized self.processor_subdir = processor_subdir return self def cleanup(self): from airflow.models import DagModel, DagRun, TaskInstance, XCom from airflow.models.dataset import DatasetEvent from airflow.models.serialized_dag import SerializedDagModel from airflow.models.taskmap import TaskMap from airflow.utils.retries import run_with_db_retries for attempt in run_with_db_retries(logger=self.log): with attempt: dag_ids = list(self.dagbag.dag_ids) if not dag_ids: return # To isolate problems here with problems from elsewhere on the session object self.session.rollback() self.session.query(SerializedDagModel).filter( SerializedDagModel.dag_id.in_(dag_ids) ).delete(synchronize_session=False) self.session.query(DagRun).filter(DagRun.dag_id.in_(dag_ids)).delete( synchronize_session=False, ) self.session.query(TaskInstance).filter(TaskInstance.dag_id.in_(dag_ids)).delete( synchronize_session=False, ) self.session.query(XCom).filter(XCom.dag_id.in_(dag_ids)).delete( synchronize_session=False, ) self.session.query(DagModel).filter(DagModel.dag_id.in_(dag_ids)).delete( synchronize_session=False, ) self.session.query(TaskMap).filter(TaskMap.dag_id.in_(dag_ids)).delete( synchronize_session=False, ) self.session.query(DatasetEvent).filter(DatasetEvent.source_dag_id.in_(dag_ids)).delete( synchronize_session=False, ) self.session.commit() if self._own_session: self.session.expunge_all() factory = DagFactory() try: yield factory finally: factory.cleanup() with suppress(AttributeError): del factory.session
Create a `DAG` with a single `EmptyOperator` task. DagRun and DagModel is also created. Apart from the already existing arguments, any other argument in kwargs is passed to the DAG and not to the EmptyOperator task. If you have an argument that you want to pass to the EmptyOperator that is not here, please use `default_args` so that the DAG will pass it to the Task:: dag, task = create_dummy_dag(default_args={"start_date": timezone.datetime(2016, 1, 1)}) You cannot be able to alter the created DagRun or DagModel, use `dag_maker` fixture instead.
def create_dummy_dag(dag_maker): """Create a `DAG` with a single `EmptyOperator` task. DagRun and DagModel is also created. Apart from the already existing arguments, any other argument in kwargs is passed to the DAG and not to the EmptyOperator task. If you have an argument that you want to pass to the EmptyOperator that is not here, please use `default_args` so that the DAG will pass it to the Task:: dag, task = create_dummy_dag(default_args={"start_date": timezone.datetime(2016, 1, 1)}) You cannot be able to alter the created DagRun or DagModel, use `dag_maker` fixture instead. """ from airflow.operators.empty import EmptyOperator from airflow.utils.types import DagRunType def create_dag( dag_id="dag", task_id="op1", task_display_name=None, max_active_tis_per_dag=16, max_active_tis_per_dagrun=None, pool="default_pool", executor_config=None, trigger_rule="all_done", on_success_callback=None, on_execute_callback=None, on_failure_callback=None, on_retry_callback=None, email=None, with_dagrun_type=DagRunType.SCHEDULED, **kwargs, ): with dag_maker(dag_id, **kwargs) as dag: op = EmptyOperator( task_id=task_id, task_display_name=task_display_name, max_active_tis_per_dag=max_active_tis_per_dag, max_active_tis_per_dagrun=max_active_tis_per_dagrun, executor_config=executor_config or {}, on_success_callback=on_success_callback, on_execute_callback=on_execute_callback, on_failure_callback=on_failure_callback, on_retry_callback=on_retry_callback, email=email, pool=pool, trigger_rule=trigger_rule, ) if with_dagrun_type is not None: dag_maker.create_dagrun(run_type=with_dagrun_type) return dag, op return create_dag
Create a TaskInstance, and associated DB rows (DagRun, DagModel, etc). Uses ``create_dummy_dag`` to create the dag structure.
def create_task_instance(dag_maker, create_dummy_dag): """Create a TaskInstance, and associated DB rows (DagRun, DagModel, etc). Uses ``create_dummy_dag`` to create the dag structure. """ def maker( execution_date=None, dagrun_state=None, state=None, run_id=None, run_type=None, data_interval=None, external_executor_id=None, map_index=-1, **kwargs, ) -> TaskInstance: if execution_date is None: from airflow.utils import timezone execution_date = timezone.utcnow() _, task = create_dummy_dag(with_dagrun_type=None, **kwargs) dagrun_kwargs = {"execution_date": execution_date, "state": dagrun_state} if run_id is not None: dagrun_kwargs["run_id"] = run_id if run_type is not None: dagrun_kwargs["run_type"] = run_type if data_interval is not None: dagrun_kwargs["data_interval"] = data_interval dagrun = dag_maker.create_dagrun(**dagrun_kwargs) (ti,) = dagrun.task_instances ti.task = task ti.state = state ti.external_executor_id = external_executor_id ti.map_index = map_index dag_maker.session.flush() return ti return maker
Disable redacted text in tests, except specific.
def _disable_redact(request: pytest.FixtureRequest, mocker): """Disable redacted text in tests, except specific.""" from airflow import settings if next(request.node.iter_markers("enable_redact"), None): with pytest.MonkeyPatch.context() as mp_ctx: mp_ctx.setattr(settings, "MASK_SECRETS_IN_LOGS", True) yield return mocked_redact = mocker.patch("airflow.utils.log.secrets_masker.SecretsMasker.redact") mocked_redact.side_effect = lambda item, name=None, max_depth=None: item with pytest.MonkeyPatch.context() as mp_ctx: mp_ctx.setattr(settings, "MASK_SECRETS_IN_LOGS", False) yield return
Returns a list of suspended providers folders that should be skipped when running tests (without any prefix - for example apache/beam, yandex, google etc.).
def get_suspended_providers_folders() -> list[str]: """ Returns a list of suspended providers folders that should be skipped when running tests (without any prefix - for example apache/beam, yandex, google etc.). """ suspended_providers = [] for provider_path in AIRFLOW_PROVIDERS_ROOT.rglob("provider.yaml"): provider_yaml = yaml.safe_load(provider_path.read_text()) if provider_yaml["state"] == "suspended": suspended_providers.append( provider_path.parent.relative_to(AIRFLOW_SOURCES_ROOT) .as_posix() .replace("airflow/providers/", "") ) return suspended_providers
Returns a list of providers folders that should be excluded for current Python version and skipped when running tests (without any prefix - for example apache/beam, yandex, google etc.).
def get_python_excluded_providers_folders() -> list[str]: """ Returns a list of providers folders that should be excluded for current Python version and skipped when running tests (without any prefix - for example apache/beam, yandex, google etc.). """ excluded_providers = [] current_python_version = f"{sys.version_info.major}.{sys.version_info.minor}" for provider_path in AIRFLOW_PROVIDERS_ROOT.rglob("provider.yaml"): provider_yaml = yaml.safe_load(provider_path.read_text()) excluded_python_versions = provider_yaml.get("excluded-python-versions", []) if current_python_version in excluded_python_versions: excluded_providers.append( provider_path.parent.relative_to(AIRFLOW_SOURCES_ROOT) .as_posix() .replace("airflow/providers/", "") ) return excluded_providers
Check the cleanup provider manager functionality.
def test_cleanup_providers_manager(cleanup_providers_manager): """Check the cleanup provider manager functionality.""" provider_manager = ProvidersManager() assert isinstance(provider_manager.hooks, LazyDictWithCache) hooks = provider_manager.hooks ProvidersManager()._cleanup() assert not len(hooks) assert ProvidersManager().hooks is hooks
Ensure there's no XCom littered by other modules.
def clean_xcom(): """Ensure there's no XCom littered by other modules.""" with create_session() as session: session.query(XCom).delete()
Function that renders a helm chart into dictionaries. For helm chart testing only
def render_chart( name="release-name", values=None, show_only=None, chart_dir=None, kubernetes_version=DEFAULT_KUBERNETES_VERSION, namespace=None, ): """ Function that renders a helm chart into dictionaries. For helm chart testing only """ values = values or {} chart_dir = chart_dir or str(CHART_DIR) namespace = namespace or "default" with NamedTemporaryFile() as tmp_file: content = yaml.dump(values) tmp_file.write(content.encode()) tmp_file.flush() command = [ "helm", "template", name, chart_dir, "--values", tmp_file.name, "--kube-version", kubernetes_version, "--namespace", namespace, ] if show_only: for i in show_only: command.extend(["--show-only", i]) templates = subprocess.check_output(command, stderr=subprocess.PIPE, cwd=chart_dir) k8s_objects = yaml.full_load_all(templates) k8s_objects = [k8s_object for k8s_object in k8s_objects if k8s_object] # type: ignore for k8s_object in k8s_objects: validate_k8s_object(k8s_object, kubernetes_version) return k8s_objects
Helper to create a lookup dict from k8s_objects. The keys of the dict are the k8s object's kind and name
def prepare_k8s_lookup_dict(k8s_objects) -> dict[tuple[str, str], dict[str, Any]]: """ Helper to create a lookup dict from k8s_objects. The keys of the dict are the k8s object's kind and name """ k8s_obj_by_key = { (k8s_object["kind"], k8s_object["metadata"]["name"]): k8s_object for k8s_object in k8s_objects } return k8s_obj_by_key
Function that renders dictionaries into k8s objects. For helm chart testing only.
def render_k8s_object(obj, type_to_render): """ Function that renders dictionaries into k8s objects. For helm chart testing only. """ return api_client._ApiClient__deserialize_model(obj, type_to_render)
Check task rules for given task.
def _check_task_rules(current_task: BaseOperator): """Check task rules for given task.""" notices = [] for rule in TASK_RULES: try: rule(current_task) except AirflowClusterPolicyViolation as ex: notices.append(str(ex)) if notices: notices_list = " * " + "\n * ".join(notices) raise AirflowClusterPolicyViolation( f"DAG policy violation (DAG ID: {current_task.dag_id}, Path: {current_task.dag.fileloc}):\n" f"Notices:\n" f"{notices_list}" )
Ensure Tasks have non-default owners.
def example_task_policy(task: BaseOperator): """Ensure Tasks have non-default owners.""" _check_task_rules(task)
Ensure that DAG has at least one tag and skip the DAG with `only_for_beta` tag.
def dag_policy(dag: DAG): """Ensure that DAG has at least one tag and skip the DAG with `only_for_beta` tag.""" if not dag.tags: raise AirflowClusterPolicyViolation( f"DAG {dag.dag_id} has no tags. At least one tag required. File path: {dag.fileloc}" ) if "only_for_beta" in dag.tags: raise AirflowClusterPolicySkipDag( f"DAG {dag.dag_id} is not loaded on the production cluster, due to `only_for_beta` tag." )
Generates configuration from provided template & variables defined in current scope. :param template: a config content templated with {{variables}}
def parameterized_config(template) -> str: """ Generates configuration from provided template & variables defined in current scope. :param template: a config content templated with {{variables}} """ all_vars = get_all_expansion_variables() return template.format(**all_vars)
Helper for recursively set permissions only for specific path and revert it back.
def set_permissions(settings: dict[Path | str, int]): """Helper for recursively set permissions only for specific path and revert it back.""" orig_permissions = [] try: print(" Change file/directory permissions ".center(72, "+")) for path, mode in settings.items(): if isinstance(path, str): path = Path(path) if len(path.parts) <= 1: raise SystemError(f"Unable to change permission for the root directory: {path}.") st_mode = os.stat(path).st_mode new_st_mode = st_mode | mode if new_st_mode > st_mode: print(f"Path={path}, mode={oct(st_mode)}, new_mode={oct(new_st_mode)}") orig_permissions.append((path, st_mode)) os.chmod(path, new_st_mode) parent_path = path.parent while len(parent_path.parts) > 1: st_mode = os.stat(parent_path).st_mode new_st_mode = st_mode | 0o755 # grant r/o access to the parent directories if new_st_mode > st_mode: print(f"Path={parent_path}, mode={oct(st_mode)}, new_mode={oct(new_st_mode)}") orig_permissions.append((parent_path, st_mode)) os.chmod(parent_path, new_st_mode) parent_path = parent_path.parent print("".center(72, "+")) yield finally: for path, mode in orig_permissions: os.chmod(path, mode)
Reset Logging
def reset_logging(): """Reset Logging""" manager = logging.root.manager manager.disabled = logging.NOTSET airflow_loggers = [ logger for logger_name, logger in manager.loggerDict.items() if logger_name.startswith("airflow") ] for logger in airflow_loggers: if isinstance(logger, logging.Logger): logger.setLevel(logging.NOTSET) logger.propagate = True logger.disabled = False logger.filters.clear() handlers = logger.handlers.copy() for handler in handlers: # Copied from `logging.shutdown`. try: handler.acquire() handler.flush() handler.close() except (OSError, ValueError): pass finally: handler.release() logger.removeHandler(handler)
Sets a settings file and puts it in the Python classpath :param content: The content of the settings file :param directory: the directory :param name: str
def settings_context(content, directory=None, name="LOGGING_CONFIG"): """ Sets a settings file and puts it in the Python classpath :param content: The content of the settings file :param directory: the directory :param name: str """ initial_logging_config = os.environ.get("AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS", "") try: settings_root = tempfile.mkdtemp() filename = f"{SETTINGS_DEFAULT_NAME}.py" if directory: # Create the directory structure with __init__.py dir_path = os.path.join(settings_root, directory) pathlib.Path(dir_path).mkdir(parents=True, exist_ok=True) basedir = settings_root for part in directory.split("/"): open(os.path.join(basedir, "__init__.py"), "w").close() basedir = os.path.join(basedir, part) open(os.path.join(basedir, "__init__.py"), "w").close() # Replace slashes by dots module = directory.replace("/", ".") + "." + SETTINGS_DEFAULT_NAME + "." + name settings_file = os.path.join(dir_path, filename) else: module = SETTINGS_DEFAULT_NAME + "." + name settings_file = os.path.join(settings_root, filename) with open(settings_file, "w") as handle: handle.writelines(content) sys.path.append(settings_root) # Using environment vars instead of conf_vars so value is accessible # to parent and child processes when using 'spawn' for multiprocessing. os.environ["AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS"] = module yield settings_file finally: os.environ["AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS"] = initial_logging_config sys.path.remove(settings_root)
Test that a "plain" function from airflow_local_settings is registered via a plugin
def test_local_settings_plain_function(plugin_manager: pluggy.PluginManager): """Test that a "plain" function from airflow_local_settings is registered via a plugin""" called = False def dag_policy(dag): nonlocal called called = True mod = Namespace(dag_policy=dag_policy) policies.make_plugin_from_local_settings(plugin_manager, mod, {"dag_policy"}) plugin_manager.hook.dag_policy(dag="a") assert called
If an function in local_settings doesn't have the "correct" name we can't naively turn it in to a plugin. This tests the sig-mismatch detection and shimming code path
def test_local_settings_misnamed_argument(plugin_manager: pluggy.PluginManager): """ If an function in local_settings doesn't have the "correct" name we can't naively turn it in to a plugin. This tests the sig-mismatch detection and shimming code path """ called_with = None def dag_policy(wrong_arg_name): nonlocal called_with called_with = wrong_arg_name mod = Namespace(dag_policy=dag_policy) policies.make_plugin_from_local_settings(plugin_manager, mod, {"dag_policy"}) plugin_manager.hook.dag_policy(dag="passed_dag_value") assert called_with == "passed_dag_value"
Tests DAG logging. :param kwargs:
def test_logging_fn(**kwargs): """Tests DAG logging. :param kwargs: """ logger.info("Log from DAG Logger") kwargs["ti"].log.info("Log from TI Logger") print("Log from Print statement")
Create a subdag.
def subdag(parent_dag_name, child_dag_name, args): """ Create a subdag. """ dag_subdag = DAG( dag_id=f"{parent_dag_name}.{child_dag_name}", default_args=args, schedule="@daily", ) for i in range(2): EmptyOperator( task_id=f"{child_dag_name}-task-{i + 1}", default_args=args, dag=dag_subdag, ) return dag_subdag
A function with two args
def a_function(_, __): """A function with two args"""
Tests _DatasetBooleanCondition's evaluate and iter_datasets methods through DatasetAny and DatasetAll. Ensures DatasetAny evaluate returns True with any true condition, DatasetAll evaluate returns False if any condition is false, and both classes correctly iterate over datasets without duplication.
def test_datasetbooleancondition_evaluate_iter(): """ Tests _DatasetBooleanCondition's evaluate and iter_datasets methods through DatasetAny and DatasetAll. Ensures DatasetAny evaluate returns True with any true condition, DatasetAll evaluate returns False if any condition is false, and both classes correctly iterate over datasets without duplication. """ any_condition = DatasetAny(dataset1, dataset2) all_condition = DatasetAll(dataset1, dataset2) assert any_condition.evaluate({"s3://bucket1/data1": False, "s3://bucket2/data2": True}) is True assert all_condition.evaluate({"s3://bucket1/data1": True, "s3://bucket2/data2": False}) is False # Testing iter_datasets indirectly through the subclasses datasets_any = set(any_condition.iter_datasets()) datasets_all = set(all_condition.iter_datasets()) assert datasets_any == {("s3://bucket1/data1", dataset1), ("s3://bucket2/data2", dataset2)} assert datasets_all == {("s3://bucket1/data1", dataset1), ("s3://bucket2/data2", dataset2)}
Fixture to create test datasets and corresponding models.
def create_test_datasets(session): """Fixture to create test datasets and corresponding models.""" datasets = [Dataset(uri=f"hello{i}") for i in range(1, 3)] for dataset in datasets: session.add(DatasetModel(uri=dataset.uri)) session.commit() return datasets
Test @task original underlying function is accessible through the __wrapped__ attribute.
def test_task_decorator_has_wrapped_attr(): """ Test @task original underlying function is accessible through the __wrapped__ attribute. """ def org_test_func(): pass decorated_test_func = task_decorator(org_test_func) assert hasattr( decorated_test_func, "__wrapped__" ), "decorated function does not have __wrapped__ attribute" assert decorated_test_func.__wrapped__ is org_test_func, "__wrapped__ attr is not the original function"
Test that the tooltip for TaskGroup is the decorated-function's docstring.
def test_tooltip_derived_from_function_docstring(): """Test that the tooltip for TaskGroup is the decorated-function's docstring.""" @dag(start_date=pendulum.datetime(2022, 1, 1)) def pipeline(): @task_group() def tg(): """Function docstring.""" tg() _ = pipeline() assert _.task_group_dict["tg"].tooltip == "Function docstring."
Test that the tooltip for TaskGroup is the explicitly set value even if the decorated function has a docstring.
def test_tooltip_not_overridden_by_function_docstring(): """ Test that the tooltip for TaskGroup is the explicitly set value even if the decorated function has a docstring. """ @dag(start_date=pendulum.datetime(2022, 1, 1)) def pipeline(): @task_group(tooltip="tooltip for the TaskGroup") def tg(): """Function docstring.""" tg() _ = pipeline() assert _.task_group_dict["tg"].tooltip == "tooltip for the TaskGroup"
Verify can_try_again returns True until at least 5 seconds have passed. For faster loops, we total tries will be higher. If loops take longer than 5 seconds, still should end up trying 2 times.
def test_running_retry_attempt_type(loop_duration, total_tries): """ Verify can_try_again returns True until at least 5 seconds have passed. For faster loops, we total tries will be higher. If loops take longer than 5 seconds, still should end up trying 2 times. """ min_seconds_for_test = 5 with time_machine.travel(pendulum.now("UTC"), tick=False) as t: # set MIN_SECONDS so tests don't break if the value is changed RunningRetryAttemptType.MIN_SECONDS = min_seconds_for_test a = RunningRetryAttemptType() while True: if not a.can_try_again(): break t.shift(loop_duration) assert a.elapsed > min_seconds_for_test assert a.total_tries == total_tries assert a.tries_after_min == 1
Pytest Fixture.
def mock_get_connection(monkeypatch: pytest.MonkeyPatch, request: pytest.FixtureRequest) -> str | None: """Pytest Fixture.""" testdata: dict[str, str | None] = request.param host: str | None = testdata.get("host", None) login: str | None = testdata.get("login", None) password: str | None = testdata.get("password", None) expected_result: str | None = testdata.get("expected_result", None) monkeypatch.setattr( "airflow.hooks.package_index.PackageIndexHook.get_connection", lambda *_: MockConnection(host, login, password), ) return expected_result
Test if connection url is assembled correctly from credentials and index_url.
def test_get_connection_url(mock_get_connection: str | None): """Test if connection url is assembled correctly from credentials and index_url.""" expected_result = mock_get_connection hook_instance = PackageIndexHook() if expected_result: connection_url = hook_instance.get_connection_url() assert connection_url == expected_result else: with pytest.raises(ValueError, match="Please provide an index URL."): hook_instance.get_connection_url()
Test if connection test responds correctly to return code.
def test_test_connection(monkeypatch: pytest.MonkeyPatch, mock_get_connection: str | None, success: int): """Test if connection test responds correctly to return code.""" def mock_run(*_, **__): class MockProc: """Mock class.""" returncode = success stderr = "some error text" return MockProc() monkeypatch.setattr("airflow.hooks.package_index.subprocess.run", mock_run) hook_instance = PackageIndexHook() if mock_get_connection: result = hook_instance.test_connection() assert result[0] == (success == 0) else: with pytest.raises(ValueError, match="Please provide an index URL"): hook_instance.test_connection()
Tests UI field result structure
def test_get_ui_field_behaviour(): """Tests UI field result structure""" ui_field_behavior = PackageIndexHook.get_ui_field_behaviour() assert "hidden_fields" in ui_field_behavior assert "relabeling" in ui_field_behavior assert "placeholders" in ui_field_behavior
a function that tests the message received
def _basic_message_tester(message, test=None) -> Any: """a function that tests the message received""" assert test # Confluent Kafka converts messages to bytes assert message.value().decode(encoding="utf-8") == test
Create MongoDB connections which use for testing purpose.
def mongo_connections(): """Create MongoDB connections which use for testing purpose.""" connections = [ Connection(conn_id="mongo_default", conn_type="mongo", host="mongo", port=27017), Connection(conn_id="mongo_test", conn_type="mongo", host="mongo", port=27017, schema="test"), ] with pytest.MonkeyPatch.context() as mp: for conn in connections: mp.setenv(f"AIRFLOW_CONN_{conn.conn_id.upper()}", conn.as_json()) yield
Test if _schedule_dag_run puts a task instance into SKIPPED state if any of its upstream tasks are skipped according to TriggerRuleDep.
def test_schedule_dag_run_with_upstream_skip(dag_maker, session): """ Test if _schedule_dag_run puts a task instance into SKIPPED state if any of its upstream tasks are skipped according to TriggerRuleDep. """ with dag_maker( dag_id="test_task_with_upstream_skip_process_task_instances", start_date=DEFAULT_DATE, session=session, ): dummy1 = EmptyOperator(task_id="dummy1") dummy2 = EmptyOperator(task_id="dummy2") dummy3 = EmptyOperator(task_id="dummy3") [dummy1, dummy2] >> dummy3 # dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock()) dr = dag_maker.create_dagrun(state=State.RUNNING) assert dr is not None tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)} # Set dummy1 to skipped and dummy2 to success. dummy3 remains as none. tis[dummy1.task_id].state = State.SKIPPED tis[dummy2.task_id].state = State.SUCCESS assert tis[dummy3.task_id].state == State.NONE session.flush() # dag_runs = DagRun.find(dag_id='test_task_with_upstream_skip_dag') # dag_file_processor._process_task_instances(dag, dag_runs=dag_runs) scheduler_job = Job() job_runner = SchedulerJobRunner(job=scheduler_job, subdir=os.devnull) job_runner._schedule_dag_run(dr, session) session.flush() tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)} assert tis[dummy1.task_id].state == State.SKIPPED assert tis[dummy2.task_id].state == State.SUCCESS # dummy3 should be skipped because dummy1 is skipped. assert tis[dummy3.task_id].state == State.SKIPPED
Fixture that cleans the database before and after every test.
def clean_database(): """Fixture that cleans the database before and after every test.""" clear_db_runs() clear_db_dags() yield # Test runs here clear_db_dags() clear_db_runs()
Fixture that provides a SQLAlchemy session
def session(): """Fixture that provides a SQLAlchemy session""" with create_session() as session: yield session
Checks that when a trigger fires, it doesn't log any sensitive information from arguments
def test_trigger_logging_sensitive_info(session, caplog): """ Checks that when a trigger fires, it doesn't log any sensitive information from arguments """ class SensitiveArgOperator(BaseOperator): def __init__(self, password, **kwargs): self.password = password super().__init__(**kwargs) # Use a trigger that will immediately succeed trigger = SuccessTrigger() op = SensitiveArgOperator(task_id="sensitive_arg_task", password="some_password") create_trigger_in_db(session, trigger, operator=op) triggerer_job = Job() triggerer_job_runner = TriggererJobRunner(triggerer_job) triggerer_job_runner.load_triggers() # Now, start TriggerRunner up (and set it as a daemon thread during tests) triggerer_job_runner.daemon = True triggerer_job_runner.trigger_runner.start() try: # Wait for up to 3 seconds for it to fire and appear in the event queue for _ in range(30): if triggerer_job_runner.trigger_runner.events: assert list(triggerer_job_runner.trigger_runner.events) == [(1, TriggerEvent(True))] break time.sleep(0.1) else: pytest.fail("TriggerRunner never sent the trigger event out") finally: # We always have to stop the runner triggerer_job_runner.trigger_runner.stop = True triggerer_job_runner.trigger_runner.join(30) # Since we have now an in-memory process of forwarding the logs to stdout, # give it more time for the trigger event to write the log. time.sleep(0.5) assert "test_dag/test_run/sensitive_arg_task/-1/1 (ID 1) starting" in caplog.text assert "some_password" not in caplog.text