response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Send an email with html content. :param to: Recipient email address or list of addresses. :param subject: Email subject. :param html_content: Email body in HTML format. :param files: List of file paths to attach to the email. :param dryrun: If True, the email will not be sent, but all other actions will be performed. :param cc: Carbon copy recipient email address or list of addresses. :param bcc: Blind carbon copy recipient email address or list of addresses. :param mime_subtype: MIME subtype of the email. :param mime_charset: MIME charset of the email. :param conn_id: Connection ID of the SMTP server. :param from_email: Sender email address. :param custom_headers: Dictionary of custom headers to include in the email. :param kwargs: Additional keyword arguments. >>> send_email("[email protected]", "foo", "<b>Foo</b> bar", ["/dev/null"], dryrun=True)
def send_email_smtp( to: str | Iterable[str], subject: str, html_content: str, files: list[str] | None = None, dryrun: bool = False, cc: str | Iterable[str] | None = None, bcc: str | Iterable[str] | None = None, mime_subtype: str = "mixed", mime_charset: str = "utf-8", conn_id: str = "smtp_default", from_email: str | None = None, custom_headers: dict[str, Any] | None = None, **kwargs, ) -> None: """Send an email with html content. :param to: Recipient email address or list of addresses. :param subject: Email subject. :param html_content: Email body in HTML format. :param files: List of file paths to attach to the email. :param dryrun: If True, the email will not be sent, but all other actions will be performed. :param cc: Carbon copy recipient email address or list of addresses. :param bcc: Blind carbon copy recipient email address or list of addresses. :param mime_subtype: MIME subtype of the email. :param mime_charset: MIME charset of the email. :param conn_id: Connection ID of the SMTP server. :param from_email: Sender email address. :param custom_headers: Dictionary of custom headers to include in the email. :param kwargs: Additional keyword arguments. >>> send_email("[email protected]", "foo", "<b>Foo</b> bar", ["/dev/null"], dryrun=True) """ smtp_mail_from = conf.get("smtp", "SMTP_MAIL_FROM") if smtp_mail_from is not None: mail_from = smtp_mail_from else: if from_email is None: raise ValueError( "You should set from email - either by smtp/smtp_mail_from config or `from_email` parameter" ) mail_from = from_email msg, recipients = build_mime_message( mail_from=mail_from, to=to, subject=subject, html_content=html_content, files=files, cc=cc, bcc=bcc, mime_subtype=mime_subtype, mime_charset=mime_charset, custom_headers=custom_headers, ) send_mime_email(e_from=mail_from, e_to=recipients, mime_msg=msg, conn_id=conn_id, dryrun=dryrun)
Build a MIME message that can be used to send an email and returns a full list of recipients. :param mail_from: Email address to set as the email's "From" field. :param to: A string or iterable of strings containing email addresses to set as the email's "To" field. :param subject: The subject of the email. :param html_content: The content of the email in HTML format. :param files: A list of paths to files to be attached to the email. :param cc: A string or iterable of strings containing email addresses to set as the email's "CC" field. :param bcc: A string or iterable of strings containing email addresses to set as the email's "BCC" field. :param mime_subtype: The subtype of the MIME message. Default: "mixed". :param mime_charset: The charset of the email. Default: "utf-8". :param custom_headers: Additional headers to add to the MIME message. No validations are run on these values, and they should be able to be encoded. :return: A tuple containing the email as a MIMEMultipart object and a list of recipient email addresses.
def build_mime_message( mail_from: str | None, to: str | Iterable[str], subject: str, html_content: str, files: list[str] | None = None, cc: str | Iterable[str] | None = None, bcc: str | Iterable[str] | None = None, mime_subtype: str = "mixed", mime_charset: str = "utf-8", custom_headers: dict[str, Any] | None = None, ) -> tuple[MIMEMultipart, list[str]]: """ Build a MIME message that can be used to send an email and returns a full list of recipients. :param mail_from: Email address to set as the email's "From" field. :param to: A string or iterable of strings containing email addresses to set as the email's "To" field. :param subject: The subject of the email. :param html_content: The content of the email in HTML format. :param files: A list of paths to files to be attached to the email. :param cc: A string or iterable of strings containing email addresses to set as the email's "CC" field. :param bcc: A string or iterable of strings containing email addresses to set as the email's "BCC" field. :param mime_subtype: The subtype of the MIME message. Default: "mixed". :param mime_charset: The charset of the email. Default: "utf-8". :param custom_headers: Additional headers to add to the MIME message. No validations are run on these values, and they should be able to be encoded. :return: A tuple containing the email as a MIMEMultipart object and a list of recipient email addresses. """ to = get_email_address_list(to) msg = MIMEMultipart(mime_subtype) msg["Subject"] = subject if mail_from: msg["From"] = mail_from msg["To"] = ", ".join(to) recipients = to if cc: cc = get_email_address_list(cc) msg["CC"] = ", ".join(cc) recipients += cc if bcc: # don't add bcc in header bcc = get_email_address_list(bcc) recipients += bcc msg["Date"] = formatdate(localtime=True) mime_text = MIMEText(html_content, "html", mime_charset) msg.attach(mime_text) for fname in files or []: basename = os.path.basename(fname) with open(fname, "rb") as file: part = MIMEApplication(file.read(), Name=basename) part["Content-Disposition"] = f'attachment; filename="{basename}"' part["Content-ID"] = f"<{basename}>" msg.attach(part) if custom_headers: for header_key, header_value in custom_headers.items(): msg[header_key] = header_value return msg, recipients
Send a MIME email. :param e_from: The email address of the sender. :param e_to: The email address or a list of email addresses of the recipient(s). :param mime_msg: The MIME message to send. :param conn_id: The ID of the SMTP connection to use. :param dryrun: If True, the email will not be sent, but a log message will be generated.
def send_mime_email( e_from: str, e_to: str | list[str], mime_msg: MIMEMultipart, conn_id: str = "smtp_default", dryrun: bool = False, ) -> None: """ Send a MIME email. :param e_from: The email address of the sender. :param e_to: The email address or a list of email addresses of the recipient(s). :param mime_msg: The MIME message to send. :param conn_id: The ID of the SMTP connection to use. :param dryrun: If True, the email will not be sent, but a log message will be generated. """ smtp_host = conf.get_mandatory_value("smtp", "SMTP_HOST") smtp_port = conf.getint("smtp", "SMTP_PORT") smtp_starttls = conf.getboolean("smtp", "SMTP_STARTTLS") smtp_ssl = conf.getboolean("smtp", "SMTP_SSL") smtp_retry_limit = conf.getint("smtp", "SMTP_RETRY_LIMIT") smtp_timeout = conf.getint("smtp", "SMTP_TIMEOUT") smtp_user = None smtp_password = None if conn_id is not None: try: from airflow.hooks.base import BaseHook airflow_conn = BaseHook.get_connection(conn_id) smtp_user = airflow_conn.login smtp_password = airflow_conn.password except AirflowException: pass if smtp_user is None or smtp_password is None: warnings.warn( "Fetching SMTP credentials from configuration variables will be deprecated in a future " "release. Please set credentials using a connection instead.", RemovedInAirflow3Warning, stacklevel=2, ) try: smtp_user = conf.get("smtp", "SMTP_USER") smtp_password = conf.get("smtp", "SMTP_PASSWORD") except AirflowConfigException: log.debug("No user/password found for SMTP, so logging in with no authentication.") if not dryrun: for attempt in range(1, smtp_retry_limit + 1): log.info("Email alerting: attempt %s", str(attempt)) try: smtp_conn = _get_smtp_connection(smtp_host, smtp_port, smtp_timeout, smtp_ssl) except smtplib.SMTPServerDisconnected: if attempt == smtp_retry_limit: raise else: if smtp_starttls: smtp_conn.starttls() if smtp_user and smtp_password: smtp_conn.login(smtp_user, smtp_password) log.info("Sent an alert email to %s", e_to) smtp_conn.sendmail(e_from, e_to, mime_msg.as_string()) smtp_conn.quit() break
Return a list of email addresses from the provided input. :param addresses: A string or iterable of strings containing email addresses. :return: A list of email addresses. :raises TypeError: If the input is not a string or iterable of strings.
def get_email_address_list(addresses: str | Iterable[str]) -> list[str]: """ Return a list of email addresses from the provided input. :param addresses: A string or iterable of strings containing email addresses. :return: A list of email addresses. :raises TypeError: If the input is not a string or iterable of strings. """ if isinstance(addresses, str): return _get_email_list_from_str(addresses) elif isinstance(addresses, collections.abc.Iterable): if not all(isinstance(item, str) for item in addresses): raise TypeError("The items in your iterable must be strings.") return list(addresses) else: raise TypeError(f"Unexpected argument type: Received '{type(addresses).__name__}'.")
Return an SMTP connection to the specified host and port, with optional SSL encryption. :param host: The hostname or IP address of the SMTP server. :param port: The port number to connect to on the SMTP server. :param timeout: The timeout in seconds for the connection. :param with_ssl: Whether to use SSL encryption for the connection. :return: An SMTP connection to the specified host and port.
def _get_smtp_connection(host: str, port: int, timeout: int, with_ssl: bool) -> smtplib.SMTP: """ Return an SMTP connection to the specified host and port, with optional SSL encryption. :param host: The hostname or IP address of the SMTP server. :param port: The port number to connect to on the SMTP server. :param timeout: The timeout in seconds for the connection. :param with_ssl: Whether to use SSL encryption for the connection. :return: An SMTP connection to the specified host and port. """ if not with_ssl: return smtplib.SMTP(host=host, port=port, timeout=timeout) else: ssl_context_string = conf.get("email", "SSL_CONTEXT") if ssl_context_string == "default": ssl_context = ssl.create_default_context() elif ssl_context_string == "none": ssl_context = None else: raise RuntimeError( f"The email.ssl_context configuration variable must " f"be set to 'default' or 'none' and is '{ssl_context_string}." ) return smtplib.SMTP_SSL(host=host, port=port, timeout=timeout, context=ssl_context)
Extract a list of email addresses from a string. The string can contain multiple email addresses separated by any of the following delimiters: ',' or ';'. :param addresses: A string containing one or more email addresses. :return: A list of email addresses.
def _get_email_list_from_str(addresses: str) -> list[str]: """ Extract a list of email addresses from a string. The string can contain multiple email addresses separated by any of the following delimiters: ',' or ';'. :param addresses: A string containing one or more email addresses. :return: A list of email addresses. """ pattern = r"\s*[,;]\s*" return re2.split(pattern, addresses)
Retrieve an empty_set_for_configuration. This method is only needed because configuration module has a deprecated method called set, and it confuses mypy. This method will be removed when we remove the deprecated method. :meta private: :return: empty set
def _get_empty_set_for_configuration() -> set[tuple[str, str]]: """ Retrieve an empty_set_for_configuration. This method is only needed because configuration module has a deprecated method called set, and it confuses mypy. This method will be removed when we remove the deprecated method. :meta private: :return: empty set """ return set()
Retrieve entry points of the given group. This is like the ``entry_points()`` function from ``importlib.metadata``, except it also returns the distribution the entry point was loaded from. Note that this may return multiple distributions to the same package if they are loaded from different ``sys.path`` entries. The caller site should implement appropriate deduplication logic if needed. :param group: Filter results to only this entrypoint group :return: Generator of (EntryPoint, Distribution) objects for the specified groups
def entry_points_with_dist(group: str) -> Iterator[EPnD]: """Retrieve entry points of the given group. This is like the ``entry_points()`` function from ``importlib.metadata``, except it also returns the distribution the entry point was loaded from. Note that this may return multiple distributions to the same package if they are loaded from different ``sys.path`` entries. The caller site should implement appropriate deduplication logic if needed. :param group: Filter results to only this entrypoint group :return: Generator of (EntryPoint, Distribution) objects for the specified groups """ return iter(_get_grouped_entry_points()[group])
Use `tempfile.TemporaryDirectory`, this function is deprecated.
def TemporaryDirectory(*args, **kwargs): """Use `tempfile.TemporaryDirectory`, this function is deprecated.""" import warnings from tempfile import TemporaryDirectory as TmpDir warnings.warn( "This function is deprecated. Please use `tempfile.TemporaryDirectory`", RemovedInAirflow3Warning, stacklevel=2, ) return TmpDir(*args, **kwargs)
Create the directory specified by path, creating intermediate directories as necessary. If directory already exists, this is a no-op. :param path: The directory to create :param mode: The mode to give to the directory e.g. 0o755, ignores umask
def mkdirs(path, mode): """ Create the directory specified by path, creating intermediate directories as necessary. If directory already exists, this is a no-op. :param path: The directory to create :param mode: The mode to give to the directory e.g. 0o755, ignores umask """ import warnings warnings.warn( f"This function is deprecated. Please use `pathlib.Path({path}).mkdir`", RemovedInAirflow3Warning, stacklevel=2, ) Path(path).mkdir(mode=mode, parents=True, exist_ok=True)
If the path contains a folder with a .zip suffix, treat it as a zip archive and return path.
def correct_maybe_zipped(fileloc: None | str | Path) -> None | str | Path: """If the path contains a folder with a .zip suffix, treat it as a zip archive and return path.""" if not fileloc: return fileloc search_ = ZIP_REGEX.search(str(fileloc)) if not search_: return fileloc _, archive, _ = search_.groups() if archive and zipfile.is_zipfile(archive): return archive else: return fileloc
Open the given file. If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive, opening the file inside the archive. :return: a file object, as in `open`, or as in `ZipFile.open`.
def open_maybe_zipped(fileloc, mode="r"): """ Open the given file. If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive, opening the file inside the archive. :return: a file object, as in `open`, or as in `ZipFile.open`. """ _, archive, filename = ZIP_REGEX.search(fileloc).groups() if archive and zipfile.is_zipfile(archive): return TextIOWrapper(zipfile.ZipFile(archive, mode=mode).open(filename)) else: return open(fileloc, mode=mode)
Recursively search the base path and return the list of file paths that should not be ignored. :param base_dir_path: the base path to be searched :param ignore_file_name: the file name containing regular expressions for files that should be ignored. :param ignore_rule_type: the concrete class for ignore rules, which implements the _IgnoreRule interface. :return: a generator of file paths which should not be ignored.
def _find_path_from_directory( base_dir_path: str | os.PathLike[str], ignore_file_name: str, ignore_rule_type: type[_IgnoreRule], ) -> Generator[str, None, None]: """Recursively search the base path and return the list of file paths that should not be ignored. :param base_dir_path: the base path to be searched :param ignore_file_name: the file name containing regular expressions for files that should be ignored. :param ignore_rule_type: the concrete class for ignore rules, which implements the _IgnoreRule interface. :return: a generator of file paths which should not be ignored. """ # A Dict of patterns, keyed using resolved, absolute paths patterns_by_dir: dict[Path, list[_IgnoreRule]] = {} for root, dirs, files in os.walk(base_dir_path, followlinks=True): patterns: list[_IgnoreRule] = patterns_by_dir.get(Path(root).resolve(), []) ignore_file_path = Path(root) / ignore_file_name if ignore_file_path.is_file(): with open(ignore_file_path) as ifile: lines_no_comments = [re2.sub(r"\s*#.*", "", line) for line in ifile.read().split("\n")] # append new patterns and filter out "None" objects, which are invalid patterns patterns += [ p for p in [ ignore_rule_type.compile(line, Path(base_dir_path), ignore_file_path) for line in lines_no_comments if line ] if p is not None ] # evaluation order of patterns is important with negation # so that later patterns can override earlier patterns patterns = list(dict.fromkeys(patterns)) dirs[:] = [subdir for subdir in dirs if not ignore_rule_type.match(Path(root) / subdir, patterns)] # explicit loop for infinite recursion detection since we are following symlinks in this walk for sd in dirs: dirpath = (Path(root) / sd).resolve() if dirpath in patterns_by_dir: raise RuntimeError( "Detected recursive loop when walking DAG directory " f"{base_dir_path}: {dirpath} has appeared more than once." ) patterns_by_dir.update({dirpath: patterns.copy()}) for file in files: if file != ignore_file_name: abs_file_path = Path(root) / file if not ignore_rule_type.match(abs_file_path, patterns): yield str(abs_file_path)
Recursively search the base path for a list of file paths that should not be ignored. :param base_dir_path: the base path to be searched :param ignore_file_name: the file name in which specifies the patterns of files/dirs to be ignored :param ignore_file_syntax: the syntax of patterns in the ignore file: regexp or glob :return: a generator of file paths.
def find_path_from_directory( base_dir_path: str | os.PathLike[str], ignore_file_name: str, ignore_file_syntax: str = conf.get_mandatory_value("core", "DAG_IGNORE_FILE_SYNTAX", fallback="regexp"), ) -> Generator[str, None, None]: """Recursively search the base path for a list of file paths that should not be ignored. :param base_dir_path: the base path to be searched :param ignore_file_name: the file name in which specifies the patterns of files/dirs to be ignored :param ignore_file_syntax: the syntax of patterns in the ignore file: regexp or glob :return: a generator of file paths. """ if ignore_file_syntax == "glob": return _find_path_from_directory(base_dir_path, ignore_file_name, _GlobIgnoreRule) elif ignore_file_syntax == "regexp" or not ignore_file_syntax: return _find_path_from_directory(base_dir_path, ignore_file_name, _RegexpIgnoreRule) else: raise ValueError(f"Unsupported ignore_file_syntax: {ignore_file_syntax}")
Traverse a directory and look for Python files. :param directory: the directory to traverse :param safe_mode: whether to use a heuristic to determine whether a file contains Airflow DAG definitions. If not provided, use the core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default to safe. :param include_examples: include example DAGs :return: a list of paths to Python files in the specified directory
def list_py_file_paths( directory: str | os.PathLike[str] | None, safe_mode: bool = conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE", fallback=True), include_examples: bool | None = None, ) -> list[str]: """Traverse a directory and look for Python files. :param directory: the directory to traverse :param safe_mode: whether to use a heuristic to determine whether a file contains Airflow DAG definitions. If not provided, use the core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default to safe. :param include_examples: include example DAGs :return: a list of paths to Python files in the specified directory """ if include_examples is None: include_examples = conf.getboolean("core", "LOAD_EXAMPLES") file_paths: list[str] = [] if directory is None: file_paths = [] elif os.path.isfile(directory): file_paths = [str(directory)] elif os.path.isdir(directory): file_paths.extend(find_dag_file_paths(directory, safe_mode)) if include_examples: from airflow import example_dags example_dag_folder = next(iter(example_dags.__path__)) file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, include_examples=False)) return file_paths
Find file paths of all DAG files.
def find_dag_file_paths(directory: str | os.PathLike[str], safe_mode: bool) -> list[str]: """Find file paths of all DAG files.""" file_paths = [] for file_path in find_path_from_directory(directory, ".airflowignore"): path = Path(file_path) try: if path.is_file() and (path.suffix == ".py" or zipfile.is_zipfile(path)): if might_contain_dag(file_path, safe_mode): file_paths.append(file_path) except Exception: log.exception("Error while examining %s", file_path) return file_paths
Check whether a Python file contains Airflow DAGs. When safe_mode is off (with False value), this function always returns True. If might_contain_dag_callable isn't specified, it uses airflow default heuristic
def might_contain_dag(file_path: str, safe_mode: bool, zip_file: zipfile.ZipFile | None = None) -> bool: """ Check whether a Python file contains Airflow DAGs. When safe_mode is off (with False value), this function always returns True. If might_contain_dag_callable isn't specified, it uses airflow default heuristic """ if not safe_mode: return True might_contain_dag_callable = conf.getimport( "core", "might_contain_dag_callable", fallback="airflow.utils.file.might_contain_dag_via_default_heuristic", ) return might_contain_dag_callable(file_path=file_path, zip_file=zip_file)
Heuristic that guesses whether a Python file contains an Airflow DAG definition. :param file_path: Path to the file to be checked. :param zip_file: if passed, checks the archive. Otherwise, check local filesystem. :return: True, if file might contain DAGs.
def might_contain_dag_via_default_heuristic(file_path: str, zip_file: zipfile.ZipFile | None = None) -> bool: """ Heuristic that guesses whether a Python file contains an Airflow DAG definition. :param file_path: Path to the file to be checked. :param zip_file: if passed, checks the archive. Otherwise, check local filesystem. :return: True, if file might contain DAGs. """ if zip_file: with zip_file.open(file_path) as current_file: content = current_file.read() else: if zipfile.is_zipfile(file_path): return True with open(file_path, "rb") as dag_file: content = dag_file.read() content = content.lower() return all(s in content for s in (b"dag", b"airflow"))
Find Airflow modules imported in the given file.
def iter_airflow_imports(file_path: str) -> Generator[str, None, None]: """Find Airflow modules imported in the given file.""" try: parsed = ast.parse(Path(file_path).read_bytes()) except Exception: return for m in _find_imported_modules(parsed): if m.startswith("airflow."): yield m
Return a unique module name in the format unusual_prefix_{sha1 of module's file path}_{original module name}.
def get_unique_dag_module_name(file_path: str) -> str: """Return a unique module name in the format unusual_prefix_{sha1 of module's file path}_{original module name}.""" if isinstance(file_path, str): path_hash = hashlib.sha1(file_path.encode("utf-8")).hexdigest() org_mod_name = Path(file_path).stem return MODIFIED_DAG_MODULE_NAME.format(path_hash=path_hash, module_name=org_mod_name) raise ValueError("file_path should be a string to generate unique module name")
Safely allows calling the ``hashlib.md5`` function when ``usedforsecurity`` is disabled in configuration. :param __string: The data to hash. Default to empty str byte. :return: The hashed value.
def md5(__string: ReadableBuffer = b"") -> hashlib._Hash: """ Safely allows calling the ``hashlib.md5`` function when ``usedforsecurity`` is disabled in configuration. :param __string: The data to hash. Default to empty str byte. :return: The hashed value. """ if sys.version_info >= (3, 9): return hashlib.md5(__string, usedforsecurity=False) # type: ignore return hashlib.md5(__string)
Validate value used as a key.
def validate_key(k: str, max_length: int = 250): """Validate value used as a key.""" if not isinstance(k, str): raise TypeError(f"The key has to be a string and is {type(k)}:{k}") if len(k) > max_length: raise AirflowException(f"The key has to be less than {max_length} characters") if not KEY_REGEX.match(k): raise AirflowException( f"The key {k!r} has to be made of alphanumeric characters, dashes, " f"dots and underscores exclusively" )
Validate value used as a group key.
def validate_group_key(k: str, max_length: int = 200): """Validate value used as a group key.""" if not isinstance(k, str): raise TypeError(f"The key has to be a string and is {type(k)}:{k}") if len(k) > max_length: raise AirflowException(f"The key has to be less than {max_length} characters") if not GROUP_KEY_REGEX.match(k): raise AirflowException( f"The key {k!r} has to be made of alphanumeric characters, dashes and underscores exclusively" )
Transform a SQLAlchemy model instance into a dictionary.
def alchemy_to_dict(obj: Any) -> dict | None: """Transform a SQLAlchemy model instance into a dictionary.""" if not obj: return None output = {} for col in obj.__table__.columns: value = getattr(obj, col.name) if isinstance(value, datetime): value = value.isoformat() output[col.name] = value return output
Get a yes or no answer from the user.
def ask_yesno(question: str, default: bool | None = None) -> bool: """Get a yes or no answer from the user.""" yes = {"yes", "y"} no = {"no", "n"} print(question) while True: choice = input().lower() if choice == "" and default is not None: return default if choice in yes: return True if choice in no: return False print("Please respond with y/yes or n/no.")
Ask the user a question and timeout if they don't respond.
def prompt_with_timeout(question: str, timeout: int, default: bool | None = None) -> bool: """Ask the user a question and timeout if they don't respond.""" def handler(signum, frame): raise AirflowException(f"Timeout {timeout}s reached") signal.signal(signal.SIGALRM, handler) signal.alarm(timeout) try: return ask_yesno(question, default) finally: signal.alarm(0)
Test if an object is a container (iterable) but not a string.
def is_container(obj: Any) -> bool: """Test if an object is a container (iterable) but not a string.""" if isinstance(obj, Proxy): # Proxy of any object is considered a container because it implements __iter__ # to forward the call to the lazily initialized object # Unwrap Proxy before checking __iter__ to evaluate the proxied object obj = obj.__wrapped__ return hasattr(obj, "__iter__") and not isinstance(obj, str)
Return obj as a tuple if obj is a container, otherwise return a tuple containing obj.
def as_tuple(obj: Any) -> tuple: """Return obj as a tuple if obj is a container, otherwise return a tuple containing obj.""" if is_container(obj): return tuple(obj) else: return tuple([obj])
Yield successive chunks of a given size from a list of items.
def chunks(items: list[T], chunk_size: int) -> Generator[list[T], None, None]: """Yield successive chunks of a given size from a list of items.""" if chunk_size <= 0: raise ValueError("Chunk size must be a positive integer") for i in range(0, len(items), chunk_size): yield items[i : i + chunk_size]
Split the list of items into chunks of a given size and pass each chunk through the reducer.
def reduce_in_chunks(fn: Callable[[S, list[T]], S], iterable: list[T], initializer: S, chunk_size: int = 0): """Split the list of items into chunks of a given size and pass each chunk through the reducer.""" if not iterable: return initializer if chunk_size == 0: chunk_size = len(iterable) return reduce(fn, chunks(iterable, chunk_size), initializer)
Return an iterable with one level flattened. >>> as_flattened_list((("blue", "red"), ("green", "yellow", "pink"))) ['blue', 'red', 'green', 'yellow', 'pink']
def as_flattened_list(iterable: Iterable[Iterable[T]]) -> list[T]: """ Return an iterable with one level flattened. >>> as_flattened_list((("blue", "red"), ("green", "yellow", "pink"))) ['blue', 'red', 'green', 'yellow', 'pink'] """ return [e for i in iterable for e in i]
Parse Jinja template string.
def parse_template_string(template_string: str) -> tuple[str | None, jinja2.Template | None]: """Parse Jinja template string.""" import jinja2 if "{{" in template_string: # jinja mode return None, jinja2.Template(template_string) else: return template_string, None
Given task instance, try_number, filename_template, return the rendered log filename. :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template
def render_log_filename(ti: TaskInstance, try_number, filename_template) -> str: """ Given task instance, try_number, filename_template, return the rendered log filename. :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template """ filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context["try_number"] = try_number return render_template_to_string(filename_jinja_template, jinja_context) return filename_template.format( dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number, )
Convert CamelCase to snake_case.
def convert_camel_to_snake(camel_str: str) -> str: """Convert CamelCase to snake_case.""" return CAMELCASE_TO_SNAKE_CASE_REGEX.sub(r"_\1", camel_str).lower()
Merge two dicts recursively, returning new dict (input dict is not mutated). Lists are not concatenated. Items in dict2 overwrite those also found in dict1.
def merge_dicts(dict1: dict, dict2: dict) -> dict: """ Merge two dicts recursively, returning new dict (input dict is not mutated). Lists are not concatenated. Items in dict2 overwrite those also found in dict1. """ merged = dict1.copy() for k, v in dict2.items(): if k in merged and isinstance(v, dict): merged[k] = merge_dicts(merged.get(k, {}), v) else: merged[k] = v return merged
Use a predicate to partition entries into false entries and true entries.
def partition(pred: Callable[[T], bool], iterable: Iterable[T]) -> tuple[Iterable[T], Iterable[T]]: """Use a predicate to partition entries into false entries and true entries.""" iter_1, iter_2 = itertools.tee(iterable) return itertools.filterfalse(pred, iter_1), filter(pred, iter_2)
Use `airflow.models.baseoperator.chain`, this function is deprecated.
def chain(*args, **kwargs): """Use `airflow.models.baseoperator.chain`, this function is deprecated.""" warnings.warn( "This function is deprecated. Please use `airflow.models.baseoperator.chain`.", RemovedInAirflow3Warning, stacklevel=2, ) return import_string("airflow.models.baseoperator.chain")(*args, **kwargs)
Use `airflow.models.baseoperator.cross_downstream`, this function is deprecated.
def cross_downstream(*args, **kwargs): """Use `airflow.models.baseoperator.cross_downstream`, this function is deprecated.""" warnings.warn( "This function is deprecated. Please use `airflow.models.baseoperator.cross_downstream`.", RemovedInAirflow3Warning, stacklevel=2, ) return import_string("airflow.models.baseoperator.cross_downstream")(*args, **kwargs)
Build airflow url using base_url and default_view and provided query. For example: http://0.0.0.0:8000/base/graph?dag_id=my-task&root=&execution_date=2020-10-27T10%3A59%3A25.615587
def build_airflow_url_with_query(query: dict[str, Any]) -> str: """ Build airflow url using base_url and default_view and provided query. For example: http://0.0.0.0:8000/base/graph?dag_id=my-task&root=&execution_date=2020-10-27T10%3A59%3A25.615587 """ import flask view = conf.get_mandatory_value("webserver", "dag_default_view").lower() return flask.url_for(f"Airflow.{view}", **query)
Render a Jinja2 template with given Airflow context. The default implementation of ``jinja2.Template.render()`` converts the input context into dict eagerly many times, which triggers deprecation messages in our custom context class. This takes the implementation apart and retain the context mapping without resolving instead. :param template: A Jinja2 template to render. :param context: The Airflow task context to render the template with. :param native: If set to *True*, render the template into a native type. A DAG can enable this with ``render_template_as_native_obj=True``. :returns: The render result.
def render_template(template: Any, context: MutableMapping[str, Any], *, native: bool) -> Any: """Render a Jinja2 template with given Airflow context. The default implementation of ``jinja2.Template.render()`` converts the input context into dict eagerly many times, which triggers deprecation messages in our custom context class. This takes the implementation apart and retain the context mapping without resolving instead. :param template: A Jinja2 template to render. :param context: The Airflow task context to render the template with. :param native: If set to *True*, render the template into a native type. A DAG can enable this with ``render_template_as_native_obj=True``. :returns: The render result. """ context = copy.copy(context) env = template.environment if template.globals: context.update((k, v) for k, v in template.globals.items() if k not in context) try: nodes = template.root_render_func(env.context_class(env, context, template.name, template.blocks)) except Exception: env.handle_exception() # Rewrite traceback to point to the template. if native: import jinja2.nativetypes return jinja2.nativetypes.native_concat(nodes) return "".join(nodes)
Shorthand to ``render_template(native=False)`` with better typing support.
def render_template_to_string(template: jinja2.Template, context: Context) -> str: """Shorthand to ``render_template(native=False)`` with better typing support.""" return render_template(template, cast(MutableMapping[str, Any], context), native=False)
Shorthand to ``render_template(native=True)`` with better typing support.
def render_template_as_native(template: jinja2.Template, context: Context) -> Any: """Shorthand to ``render_template(native=True)`` with better typing support.""" return render_template(template, cast(MutableMapping[str, Any], context), native=True)
Return True if exactly one of *args is "truthy", and False otherwise. If user supplies an iterable, we raise ValueError and force them to unpack.
def exactly_one(*args) -> bool: """ Return True if exactly one of *args is "truthy", and False otherwise. If user supplies an iterable, we raise ValueError and force them to unpack. """ if is_container(args[0]): raise ValueError( "Not supported for iterable args. Use `*` to unpack your iterable in the function call." ) return sum(map(bool, args)) == 1
Return True if at most one of *args is "truthy", and False otherwise. NOTSET is treated the same as None. If user supplies an iterable, we raise ValueError and force them to unpack.
def at_most_one(*args) -> bool: """ Return True if at most one of *args is "truthy", and False otherwise. NOTSET is treated the same as None. If user supplies an iterable, we raise ValueError and force them to unpack. """ def is_set(val): if val is NOTSET: return False else: return bool(val) return sum(map(is_set, args)) in (0, 1)
Given dict ``val``, returns new dict based on ``val`` with all empty elements removed. What constitutes "empty" is controlled by the ``mode`` parameter. If mode is 'strict' then only ``None`` elements will be removed. If mode is ``truthy``, then element ``x`` will be removed if ``bool(x) is False``.
def prune_dict(val: Any, mode="strict"): """ Given dict ``val``, returns new dict based on ``val`` with all empty elements removed. What constitutes "empty" is controlled by the ``mode`` parameter. If mode is 'strict' then only ``None`` elements will be removed. If mode is ``truthy``, then element ``x`` will be removed if ``bool(x) is False``. """ def is_empty(x): if mode == "strict": return x is None elif mode == "truthy": return bool(x) is False raise ValueError("allowable values for `mode` include 'truthy' and 'strict'") if isinstance(val, dict): new_dict = {} for k, v in val.items(): if is_empty(v): continue elif isinstance(v, (list, dict)): new_val = prune_dict(v, mode=mode) if not is_empty(new_val): new_dict[k] = new_val else: new_dict[k] = v return new_dict elif isinstance(val, list): new_list = [] for v in val: if is_empty(v): continue elif isinstance(v, (list, dict)): new_val = prune_dict(v, mode=mode) if not is_empty(new_val): new_list.append(new_val) else: new_list.append(v) return new_list else: return val
Ensure *kwargs1* and *kwargs2* do not contain common keys. :raises TypeError: If common keys are found.
def prevent_duplicates(kwargs1: dict[str, Any], kwargs2: Mapping[str, Any], *, fail_reason: str) -> None: """Ensure *kwargs1* and *kwargs2* do not contain common keys. :raises TypeError: If common keys are found. """ duplicated_keys = set(kwargs1).intersection(kwargs2) if not duplicated_keys: return if len(duplicated_keys) == 1: raise TypeError(f"{fail_reason} argument: {duplicated_keys.pop()}") duplicated_keys_display = ", ".join(sorted(duplicated_keys)) raise TypeError(f"{fail_reason} arguments: {duplicated_keys_display}")
Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed.
def import_string(dotted_path: str): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit(".", 1) except ValueError: raise ImportError(f"{dotted_path} doesn't look like a module path") module = import_module(module_path) try: return getattr(module, class_name) except AttributeError: raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute/class')
Convert an attribute/class/function to a string importable by ``import_string``.
def qualname(o: object | Callable) -> str: """Convert an attribute/class/function to a string importable by ``import_string``.""" if callable(o) and hasattr(o, "__module__") and hasattr(o, "__name__"): return f"{o.__module__}.{o.__name__}" cls = o if not isinstance(cls, type): # instance or class cls = type(cls) name = cls.__qualname__ module = cls.__module__ if module and module != "__builtin__": return f"{module}.{name}" return name
Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host.
def getfqdn(name=""): """ Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host. """ name = name.strip() if not name or name == "0.0.0.0": name = socket.gethostname() try: addrs = socket.getaddrinfo(name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) except OSError: pass else: for addr in addrs: if addr[3]: name = addr[3] break return name
Fetch host ip address.
def get_host_ip_address(): """Fetch host ip address.""" return socket.gethostbyname(getfqdn())
Fetch the hostname using the callable from config or use `airflow.utils.net.getfqdn` as a fallback.
def get_hostname(): """Fetch the hostname using the callable from config or use `airflow.utils.net.getfqdn` as a fallback.""" return conf.getimport("core", "hostname_callable", fallback="airflow.utils.net.getfqdn")()
Return values used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True. :param context: The context for the task_instance of interest. :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format. :return: task_instance context as dict.
def context_to_airflow_vars(context: Mapping[str, Any], in_env_var_format: bool = False) -> dict[str, str]: """ Return values used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True. :param context: The context for the task_instance of interest. :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format. :return: task_instance context as dict. """ params = {} if in_env_var_format: name_format = "env_var_format" else: name_format = "default" task = context.get("task") task_instance = context.get("task_instance") dag_run = context.get("dag_run") ops = [ (task, "email", "AIRFLOW_CONTEXT_DAG_EMAIL"), (task, "owner", "AIRFLOW_CONTEXT_DAG_OWNER"), (task_instance, "dag_id", "AIRFLOW_CONTEXT_DAG_ID"), (task_instance, "task_id", "AIRFLOW_CONTEXT_TASK_ID"), (task_instance, "execution_date", "AIRFLOW_CONTEXT_EXECUTION_DATE"), (task_instance, "try_number", "AIRFLOW_CONTEXT_TRY_NUMBER"), (dag_run, "run_id", "AIRFLOW_CONTEXT_DAG_RUN_ID"), ] context_params = settings.get_airflow_context_vars(context) for key, value in context_params.items(): if not isinstance(key, str): raise TypeError(f"key <{key}> must be string") if not isinstance(value, str): raise TypeError(f"value of key <{key}> must be string, not {type(value)}") if in_env_var_format: if not key.startswith(ENV_VAR_FORMAT_PREFIX): key = ENV_VAR_FORMAT_PREFIX + key.upper() else: if not key.startswith(DEFAULT_FORMAT_PREFIX): key = DEFAULT_FORMAT_PREFIX + key params[key] = value for subject, attr, mapping_key in ops: _attr = getattr(subject, attr, None) if subject and _attr: mapping_value = AIRFLOW_VAR_NAME_FORMAT_MAPPING[mapping_key][name_format] if isinstance(_attr, str): params[mapping_value] = _attr elif isinstance(_attr, datetime): params[mapping_value] = _attr.isoformat() elif isinstance(_attr, list): # os env variable value needs to be string params[mapping_value] = ",".join(_attr) else: params[mapping_value] = str(_attr) return params
Inspect the signature of a callable to determine which kwargs need to be passed to the callable. :param func: The callable that you want to invoke :param args: The positional arguments that need to be passed to the callable, so we know how many to skip. :param kwargs: The keyword arguments that need to be filtered before passing to the callable. :return: A dictionary which contains the keyword arguments that are compatible with the callable.
def determine_kwargs( func: Callable[..., Any], args: Collection[Any], kwargs: Mapping[str, Any], ) -> Mapping[str, Any]: """ Inspect the signature of a callable to determine which kwargs need to be passed to the callable. :param func: The callable that you want to invoke :param args: The positional arguments that need to be passed to the callable, so we know how many to skip. :param kwargs: The keyword arguments that need to be filtered before passing to the callable. :return: A dictionary which contains the keyword arguments that are compatible with the callable. """ return KeywordParameters.determine(func, args, kwargs).unpacking()
Create a new callable that only forwards necessary arguments from any provided input. Make a new callable that can accept any number of positional or keyword arguments but only forwards those required by the given callable func.
def make_kwargs_callable(func: Callable[..., R]) -> Callable[..., R]: """ Create a new callable that only forwards necessary arguments from any provided input. Make a new callable that can accept any number of positional or keyword arguments but only forwards those required by the given callable func. """ import functools @functools.wraps(func) def kwargs_func(*args, **kwargs): kwargs = determine_kwargs(func, args, kwargs) return func(*args, **kwargs) return kwargs_func
Setups event handlers.
def setup_event_handlers(engine): """Setups event handlers.""" from airflow.models import import_all_models event.listen(get_orm_mapper(), "before_configured", import_all_models, once=True) @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info["pid"] = os.getpid() if engine.dialect.name == "sqlite": @event.listens_for(engine, "connect") def set_sqlite_pragma(dbapi_connection, connection_record): cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() # this ensures coherence in mysql when storing datetimes (not required for postgres) if engine.dialect.name == "mysql": @event.listens_for(engine, "connect") def set_mysql_timezone(dbapi_connection, connection_record): cursor = dbapi_connection.cursor() cursor.execute("SET time_zone = '+00:00'") cursor.close() @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info["pid"] != pid: connection_record.connection = connection_proxy.connection = None raise exc.DisconnectionError( f"Connection record belongs to pid {connection_record.info['pid']}, " f"attempting to check out in pid {pid}" ) if conf.getboolean("debug", "sqlalchemy_stats", fallback=False): @event.listens_for(engine, "before_cursor_execute") def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault("query_start_time", []).append(time.perf_counter()) @event.listens_for(engine, "after_cursor_execute") def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = time.perf_counter() - conn.info["query_start_time"].pop() file_name = [ f"'{f.name}':{f.filename}:{f.lineno}" for f in traceback.extract_stack() if "sqlalchemy" not in f.filename ][-1] stack = [f for f in traceback.extract_stack() if "sqlalchemy" not in f.filename] stack_info = ">".join([f"{f.filename.rpartition('/')[-1]}:{f.name}" for f in stack][-3:]) conn.info.setdefault("query_start_time", []).append(time.monotonic()) log.info( "@SQLALCHEMY %s |$ %s |$ %s |$ %s ", total, file_name, stack_info, statement.replace("\n", " "), )
Check if stdout is connected (is associated with a terminal device) to a tty(-like) device.
def is_tty(): """Check if stdout is connected (is associated with a terminal device) to a tty(-like) device.""" if not hasattr(sys.stdout, "isatty"): return False return sys.stdout.isatty()
Try to determine if the current terminal supports colors.
def is_terminal_support_colors() -> bool: """Try to determine if the current terminal supports colors.""" if sys.platform == "win32": return False if not is_tty(): return False if "COLORTERM" in os.environ: return True term = os.environ.get("TERM", "dumb").lower() if term in ("xterm", "linux") or "color" in term: return True return False
Return the git commit hash representing the current version of the application.
def get_airflow_git_version(): """Return the git commit hash representing the current version of the application.""" git_version = None try: git_version = str(pkgutil.get_data("airflow", "git_version"), encoding="UTF-8") except Exception as e: log.debug(e) return git_version
Get the username of the current user, or error with a nice error message if there's no current user. We don't want to fall back to os.getuid() because not having a username probably means the rest of the user environment is wrong (e.g. no $HOME). Explicit failure is better than silently trying to work badly.
def getuser() -> str: """ Get the username of the current user, or error with a nice error message if there's no current user. We don't want to fall back to os.getuid() because not having a username probably means the rest of the user environment is wrong (e.g. no $HOME). Explicit failure is better than silently trying to work badly. """ try: return getpass.getuser() except KeyError: # Inner import to avoid circular import from airflow.exceptions import AirflowConfigException raise AirflowConfigException( "The user that Airflow is running as has no username; you must run" "Airflow as a full user, with a username and home directory, " "in order for it to function properly." )
Send sig (SIGTERM) to the process group of pid. Tries really hard to terminate all processes in the group (including grandchildren). Will send sig (SIGTERM) to the process group of pid. If any process is alive after timeout a SIGKILL will be send. :param process_group_id: process group id to kill. The process that wants to create the group should run `airflow.utils.process_utils.set_new_process_group()` as the first command it executes which will set group id = process_id. Effectively the process that is the "root" of the group has pid = gid and all other processes in the group have different pids but the same gid (equal the pid of the root process) :param logger: log handler :param sig: signal type :param timeout: how much time a process has to terminate
def reap_process_group( process_group_id: int, logger, sig: signal.Signals = signal.SIGTERM, timeout: int = DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM, ) -> dict[int, int]: """ Send sig (SIGTERM) to the process group of pid. Tries really hard to terminate all processes in the group (including grandchildren). Will send sig (SIGTERM) to the process group of pid. If any process is alive after timeout a SIGKILL will be send. :param process_group_id: process group id to kill. The process that wants to create the group should run `airflow.utils.process_utils.set_new_process_group()` as the first command it executes which will set group id = process_id. Effectively the process that is the "root" of the group has pid = gid and all other processes in the group have different pids but the same gid (equal the pid of the root process) :param logger: log handler :param sig: signal type :param timeout: how much time a process has to terminate """ returncodes = {} def on_terminate(p): logger.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode) returncodes[p.pid] = p.returncode def signal_procs(sig): if IS_WINDOWS: return try: logger.info("Sending the signal %s to group %s", sig, process_group_id) os.killpg(process_group_id, sig) except OSError as err_killpg: # If operation not permitted error is thrown due to run_as_user, # use sudo -n(--non-interactive) to kill the process if err_killpg.errno == errno.EPERM: subprocess.check_call( ["sudo", "-n", "kill", "-" + str(int(sig))] + [str(p.pid) for p in all_processes_in_the_group] ) elif err_killpg.errno == errno.ESRCH: # There is a rare condition that the process has not managed yet to change its process # group. In this case os.killpg fails with ESRCH error # So we additionally send a kill signal to the process itself. logger.info( "Sending the signal %s to process %s as process group is missing.", sig, process_group_id ) try: os.kill(process_group_id, sig) except OSError as err_kill: if err_kill.errno == errno.EPERM: subprocess.check_call(["sudo", "-n", "kill", "-" + str(process_group_id)]) else: raise else: raise if not IS_WINDOWS and process_group_id == os.getpgid(0): raise RuntimeError("I refuse to kill myself") try: parent = psutil.Process(process_group_id) all_processes_in_the_group = parent.children(recursive=True) all_processes_in_the_group.append(parent) except psutil.NoSuchProcess: # The process already exited, but maybe its children haven't. all_processes_in_the_group = [] for proc in psutil.process_iter(): try: if os.getpgid(proc.pid) == process_group_id and proc.pid != 0: all_processes_in_the_group.append(proc) except OSError: pass logger.info( "Sending %s to group %s. PIDs of all processes in the group: %s", sig, process_group_id, [p.pid for p in all_processes_in_the_group], ) try: signal_procs(sig) except OSError as err: # No such process, which means there is no such process group - our job # is done if err.errno == errno.ESRCH: return returncodes _, alive = psutil.wait_procs(all_processes_in_the_group, timeout=timeout, callback=on_terminate) if alive: for proc in alive: logger.warning("process %s did not respond to SIGTERM. Trying SIGKILL", proc) try: signal_procs(signal.SIGKILL) except OSError as err: if err.errno != errno.ESRCH: raise _, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate) if alive: for proc in alive: logger.error("Process %s (%s) could not be killed. Giving up.", proc, proc.pid) return returncodes
Execute a process and stream output to logger. :param cmd: command and arguments to run :param cwd: Current working directory passed to the Popen constructor
def execute_in_subprocess(cmd: list[str], cwd: str | None = None) -> None: """ Execute a process and stream output to logger. :param cmd: command and arguments to run :param cwd: Current working directory passed to the Popen constructor """ execute_in_subprocess_with_kwargs(cmd, cwd=cwd)
Execute a process and stream output to logger. :param cmd: command and arguments to run All other keyword args will be passed directly to subprocess.Popen
def execute_in_subprocess_with_kwargs(cmd: list[str], **kwargs) -> None: """ Execute a process and stream output to logger. :param cmd: command and arguments to run All other keyword args will be passed directly to subprocess.Popen """ log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd)) with subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, close_fds=True, **kwargs ) as proc: log.info("Output:") if proc.stdout: with proc.stdout: for line in iter(proc.stdout.readline, b""): log.info("%s", line.decode().rstrip()) exit_code = proc.wait() if exit_code != 0: raise subprocess.CalledProcessError(exit_code, cmd)
Run the new command as a subprocess. Runs the new command as a subprocess and ensures that the terminal's state is restored to its original state after the process is completed e.g. if the subprocess hides the cursor, it will be restored after the process is completed.
def execute_interactive(cmd: list[str], **kwargs) -> None: """ Run the new command as a subprocess. Runs the new command as a subprocess and ensures that the terminal's state is restored to its original state after the process is completed e.g. if the subprocess hides the cursor, it will be restored after the process is completed. """ log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd)) old_tty = termios.tcgetattr(sys.stdin) old_sigint_handler = signal.getsignal(signal.SIGINT) tty.setcbreak(sys.stdin.fileno()) # open pseudo-terminal to interact with subprocess primary_fd, secondary_fd = pty.openpty() try: with subprocess.Popen( cmd, stdin=secondary_fd, stdout=secondary_fd, stderr=secondary_fd, universal_newlines=True, **kwargs, ) as proc: # ignore SIGINT in the parent process signal.signal(signal.SIGINT, signal.SIG_IGN) while proc.poll() is None: readable_fbs, _, _ = select.select([sys.stdin, primary_fd], [], [], 0) if sys.stdin in readable_fbs: input_data = os.read(sys.stdin.fileno(), 10240) os.write(primary_fd, input_data) if primary_fd in readable_fbs: output_data = os.read(primary_fd, 10240) if output_data: os.write(sys.stdout.fileno(), output_data) finally: # restore tty settings back signal.signal(signal.SIGINT, old_sigint_handler) termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
Kills child processes for the current process. First, it sends the SIGTERM signal, and after the time specified by the `timeout` parameter, sends the SIGKILL signal, if the process is still alive. :param pids_to_kill: List of PID to be killed. :param timeout: The time to wait before sending the SIGKILL signal.
def kill_child_processes_by_pids(pids_to_kill: list[int], timeout: int = 5) -> None: """ Kills child processes for the current process. First, it sends the SIGTERM signal, and after the time specified by the `timeout` parameter, sends the SIGKILL signal, if the process is still alive. :param pids_to_kill: List of PID to be killed. :param timeout: The time to wait before sending the SIGKILL signal. """ this_process = psutil.Process(os.getpid()) # Only check child processes to ensure that we don't have a case # where we kill the wrong process because a child process died # but the PID got reused. child_processes = [ x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill ] # First try SIGTERM for child in child_processes: log.info("Terminating child PID: %s", child.pid) child.terminate() log.info("Waiting up to %s seconds for processes to exit...", timeout) try: psutil.wait_procs( child_processes, timeout=timeout, callback=lambda x: log.info("Terminated PID %s", x.pid) ) except psutil.TimeoutExpired: log.debug("Ran out of time while waiting for processes to exit") # Then SIGKILL child_processes = [ x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill ] if child_processes: log.info("SIGKILL processes that did not terminate gracefully") for child in child_processes: log.info("Killing child PID: %s", child.pid) child.kill() child.wait()
Set environment variables in context. After leaving the context, it restores its original state. :param new_env_variables: Environment variables to set
def patch_environ(new_env_variables: dict[str, str]) -> Generator[None, None, None]: """ Set environment variables in context. After leaving the context, it restores its original state. :param new_env_variables: Environment variables to set """ current_env_state = {key: os.environ.get(key) for key in new_env_variables} os.environ.update(new_env_variables) try: yield finally: for key, old_value in current_env_state.items(): if old_value is None: if key in os.environ: del os.environ[key] else: os.environ[key] = old_value
Check if a pidfile already exists and process is still running. If process is dead then pidfile is removed. :param pid_file: path to the pidfile :param process_name: name used in exception if process is up and running
def check_if_pidfile_process_is_running(pid_file: str, process_name: str): """ Check if a pidfile already exists and process is still running. If process is dead then pidfile is removed. :param pid_file: path to the pidfile :param process_name: name used in exception if process is up and running """ pid_lock_file = PIDLockFile(path=pid_file) # If file exists if pid_lock_file.is_locked(): # Read the pid pid = pid_lock_file.read_pid() if pid is None: return try: # Check if process is still running proc = psutil.Process(pid) if proc.is_running(): raise AirflowException(f"The {process_name} is already running under PID {pid}.") except psutil.NoSuchProcess: # If process is dead remove the pidfile pid_lock_file.break_lock()
Try to set current process to a new process group. That makes it easy to kill all sub-process of this at the OS-level, rather than having to iterate the child processes. If current process was spawned by system call ``exec()``, the current process group is kept.
def set_new_process_group() -> None: """Try to set current process to a new process group. That makes it easy to kill all sub-process of this at the OS-level, rather than having to iterate the child processes. If current process was spawned by system call ``exec()``, the current process group is kept. """ if os.getpid() == os.getsid(0): # If PID = SID than process a session leader, and it is not possible to change process group return os.setpgid(0, 0)
Make sure that providers configuration is loaded before actually calling the decorated function. ProvidersManager initialization of configuration is relatively inexpensive - it walks through all providers's entrypoints, retrieve the provider_info and loads config yaml parts of the get_info. Unlike initialization of hooks and operators it does not import any of the provider's code, so it can be run quickly by all commands that need to access providers configuration. We cannot even import ProvidersManager while importing any of the commands, so we need to locally import it here. We cannot initialize the configuration in settings/conf because of the way how conf/settings are used internally - they are loaded while importing airflow, and we need to access airflow version conf in the ProvidesManager initialization, so instead we opt for decorating all the methods that need it with this decorator. The decorator should be placed below @suppress_logs_and_warning but above @provide_session in order to avoid spoiling the output of formatted options with some warnings ar infos, and to be prepared that session creation might need some configuration defaults from the providers configuration. :param func: function to makes sure that providers configuration is loaded before actually calling
def providers_configuration_loaded(func: Callable[PS, RT]) -> Callable[PS, RT]: """ Make sure that providers configuration is loaded before actually calling the decorated function. ProvidersManager initialization of configuration is relatively inexpensive - it walks through all providers's entrypoints, retrieve the provider_info and loads config yaml parts of the get_info. Unlike initialization of hooks and operators it does not import any of the provider's code, so it can be run quickly by all commands that need to access providers configuration. We cannot even import ProvidersManager while importing any of the commands, so we need to locally import it here. We cannot initialize the configuration in settings/conf because of the way how conf/settings are used internally - they are loaded while importing airflow, and we need to access airflow version conf in the ProvidesManager initialization, so instead we opt for decorating all the methods that need it with this decorator. The decorator should be placed below @suppress_logs_and_warning but above @provide_session in order to avoid spoiling the output of formatted options with some warnings ar infos, and to be prepared that session creation might need some configuration defaults from the providers configuration. :param func: function to makes sure that providers configuration is loaded before actually calling """ @wraps(func) def wrapped_function(*args, **kwargs) -> RT: from airflow.providers_manager import ProvidersManager ProvidersManager().initialize_providers_configuration() return func(*args, **kwargs) return wrapped_function
Create a virtual environment and install the additional python packages. :param venv_directory: The path for directory where the environment will be created. :param python_bin: Path to the Python executable. :param system_site_packages: Whether to include system_site_packages in your virtualenv. See virtualenv documentation for more information. :param requirements: List of additional python packages. :param requirements_file_path: Path to the ``requirements.txt`` file. :param pip_install_options: a list of pip install options when installing requirements See 'pip install -h' for available options :param index_urls: an optional list of index urls to load Python packages from. If not provided the system pip conf will be used to source packages from. :return: Path to a binary file with Python in a virtual environment.
def prepare_virtualenv( venv_directory: str, python_bin: str, system_site_packages: bool, requirements: list[str] | None = None, requirements_file_path: str | None = None, pip_install_options: list[str] | None = None, index_urls: list[str] | None = None, ) -> str: """ Create a virtual environment and install the additional python packages. :param venv_directory: The path for directory where the environment will be created. :param python_bin: Path to the Python executable. :param system_site_packages: Whether to include system_site_packages in your virtualenv. See virtualenv documentation for more information. :param requirements: List of additional python packages. :param requirements_file_path: Path to the ``requirements.txt`` file. :param pip_install_options: a list of pip install options when installing requirements See 'pip install -h' for available options :param index_urls: an optional list of index urls to load Python packages from. If not provided the system pip conf will be used to source packages from. :return: Path to a binary file with Python in a virtual environment. """ if pip_install_options is None: pip_install_options = [] if index_urls is not None: _generate_pip_conf(Path(venv_directory) / "pip.conf", index_urls) virtualenv_cmd = _generate_virtualenv_cmd(venv_directory, python_bin, system_site_packages) execute_in_subprocess(virtualenv_cmd) if requirements is not None and requirements_file_path is not None: raise ValueError("Either requirements OR requirements_file_path has to be passed, but not both") pip_cmd = None if requirements is not None and len(requirements) != 0: pip_cmd = _generate_pip_install_cmd_from_list(venv_directory, requirements, pip_install_options) if requirements_file_path is not None and requirements_file_path: pip_cmd = _generate_pip_install_cmd_from_file( venv_directory, requirements_file_path, pip_install_options ) if pip_cmd: execute_in_subprocess(pip_cmd) return f"{venv_directory}/bin/python"
Render the python script to a file to execute in the virtual environment. :param jinja_context: The jinja context variables to unpack and replace with its placeholders in the template file. :param filename: The name of the file to dump the rendered script to. :param render_template_as_native_obj: If ``True``, rendered Jinja template would be converted to a native Python object
def write_python_script( jinja_context: dict, filename: str, render_template_as_native_obj: bool = False, ): """ Render the python script to a file to execute in the virtual environment. :param jinja_context: The jinja context variables to unpack and replace with its placeholders in the template file. :param filename: The name of the file to dump the rendered script to. :param render_template_as_native_obj: If ``True``, rendered Jinja template would be converted to a native Python object """ template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname(__file__)) template_env: jinja2.Environment if render_template_as_native_obj: template_env = jinja2.nativetypes.NativeEnvironment( loader=template_loader, undefined=jinja2.StrictUndefined ) else: template_env = jinja2.Environment( loader=template_loader, undefined=jinja2.StrictUndefined, autoescape=select_autoescape(["html", "xml"]), ) template = template_env.get_template("python_virtualenv_script.jinja2") template.stream(**jinja_context).dump(filename)
Return Tenacity Retrying object with project specific default.
def run_with_db_retries(max_retries: int = MAX_DB_RETRIES, logger: logging.Logger | None = None, **kwargs): """Return Tenacity Retrying object with project specific default.""" import tenacity # Default kwargs retry_kwargs = dict( retry=tenacity.retry_if_exception_type(exception_types=(OperationalError, DBAPIError)), wait=tenacity.wait_random_exponential(multiplier=0.5, max=5), stop=tenacity.stop_after_attempt(max_retries), reraise=True, **kwargs, ) if logger and isinstance(logger, logging.Logger): retry_kwargs["before_sleep"] = tenacity.before_sleep_log(logger, logging.DEBUG, True) return tenacity.Retrying(**retry_kwargs)
Retry functions in case of ``OperationalError`` from DB. It should not be used with ``@provide_session``.
def retry_db_transaction(_func: Callable | None = None, *, retries: int = MAX_DB_RETRIES, **retry_kwargs): """ Retry functions in case of ``OperationalError`` from DB. It should not be used with ``@provide_session``. """ def retry_decorator(func: Callable) -> Callable: # Get Positional argument for 'session' func_params = signature(func).parameters try: # func_params is an ordered dict -- this is the "recommended" way of getting the position session_args_idx = tuple(func_params).index("session") except ValueError: raise ValueError(f"Function {func.__qualname__} has no `session` argument") # We don't need this anymore -- ensure we don't keep a reference to it by mistake del func_params @functools.wraps(func) def wrapped_function(*args, **kwargs): logger = args[0].log if args and hasattr(args[0], "log") else logging.getLogger(func.__module__) # Get session from args or kwargs if "session" in kwargs: session = kwargs["session"] elif len(args) > session_args_idx: session = args[session_args_idx] else: raise TypeError(f"session is a required argument for {func.__qualname__}") for attempt in run_with_db_retries(max_retries=retries, logger=logger, **retry_kwargs): with attempt: logger.debug( "Running %s with retries. Try %d of %d", func.__qualname__, attempt.retry_state.attempt_number, retries, ) try: return func(*args, **kwargs) except OperationalError: session.rollback() raise return wrapped_function # Allow using decorator with and without arguments if _func is None: return retry_decorator else: return retry_decorator(_func)
Start a http server to serve scheduler health check.
def serve_health_check(): """Start a http server to serve scheduler health check.""" health_check_host = conf.get("scheduler", "SCHEDULER_HEALTH_CHECK_SERVER_HOST") health_check_port = conf.getint("scheduler", "SCHEDULER_HEALTH_CHECK_SERVER_PORT") httpd = HTTPServer((health_check_host, health_check_port), HealthServer) httpd.serve_forever()
Get the count of records against dttm filter and states. :param dttm_filter: date time filter for execution date :param external_task_ids: The list of task_ids :param external_task_group_id: The ID of the external task group :param external_dag_id: The ID of the external DAG. :param states: task or dag states :param session: airflow session object
def _get_count( dttm_filter, external_task_ids, external_task_group_id, external_dag_id, states, session: Session = NEW_SESSION, ) -> int: """ Get the count of records against dttm filter and states. :param dttm_filter: date time filter for execution date :param external_task_ids: The list of task_ids :param external_task_group_id: The ID of the external task group :param external_dag_id: The ID of the external DAG. :param states: task or dag states :param session: airflow session object """ TI = TaskInstance DR = DagRun if not dttm_filter: return 0 if external_task_ids: count = ( session.scalar( _count_query(TI, states, dttm_filter, external_dag_id, session).filter( TI.task_id.in_(external_task_ids) ) ) ) / len(external_task_ids) elif external_task_group_id: external_task_group_task_ids = _get_external_task_group_task_ids( dttm_filter, external_task_group_id, external_dag_id, session ) if not external_task_group_task_ids: count = 0 else: count = ( session.scalar( _count_query(TI, states, dttm_filter, external_dag_id, session).filter( tuple_in_condition((TI.task_id, TI.map_index), external_task_group_task_ids) ) ) ) / len(external_task_group_task_ids) else: count = session.scalar(_count_query(DR, states, dttm_filter, external_dag_id, session)) return cast(int, count)
Get the count of records against dttm filter and states. :param model: The SQLAlchemy model representing the relevant table. :param states: task or dag states :param dttm_filter: date time filter for execution date :param external_dag_id: The ID of the external DAG. :param session: airflow session object
def _count_query(model, states, dttm_filter, external_dag_id, session: Session) -> Query: """ Get the count of records against dttm filter and states. :param model: The SQLAlchemy model representing the relevant table. :param states: task or dag states :param dttm_filter: date time filter for execution date :param external_dag_id: The ID of the external DAG. :param session: airflow session object """ query = select(func.count()).filter( model.dag_id == external_dag_id, model.state.in_(states), model.execution_date.in_(dttm_filter) ) return query
Get the count of records against dttm filter and states. :param dttm_filter: date time filter for execution date :param external_task_group_id: The ID of the external task group :param external_dag_id: The ID of the external DAG. :param session: airflow session object
def _get_external_task_group_task_ids(dttm_filter, external_task_group_id, external_dag_id, session): """ Get the count of records against dttm filter and states. :param dttm_filter: date time filter for execution date :param external_task_group_id: The ID of the external task group :param external_dag_id: The ID of the external DAG. :param session: airflow session object """ refreshed_dag_info = DagBag(read_dags_from_db=True).get_dag(external_dag_id, session) task_group = refreshed_dag_info.task_group_dict.get(external_task_group_id) if task_group: group_tasks = session.scalars( select(TaskInstance).filter( TaskInstance.dag_id == external_dag_id, TaskInstance.task_id.in_(task.task_id for task in task_group), TaskInstance.execution_date.in_(dttm_filter), ) ) return [(t.task_id, t.map_index) for t in group_tasks] # returning default task_id as group_id itself, this will avoid any failure in case of # 'check_existence=False' and will fail on timeout return [(external_task_group_id, -1)]
Serve logs generated by Worker.
def serve_logs(port=None): """Serve logs generated by Worker.""" setproctitle("airflow serve-logs") wsgi_app = create_app() port = port or conf.getint("logging", "WORKER_LOG_SERVER_PORT") # If dual stack is available and IPV6_V6ONLY is not enabled on the socket # then when IPV6 is bound to it will also bind to IPV4 automatically if getattr(socket, "has_dualstack_ipv6", lambda: False)(): bind_option = GunicornOption("bind", f"[::]:{port}") else: bind_option = GunicornOption("bind", f"0.0.0.0:{port}") options = [bind_option, GunicornOption("workers", 2)] StandaloneGunicornApplication(wsgi_app, options).run()
Contextmanager that will create and teardown a session.
def create_session() -> Generator[SASession, None, None]: """Contextmanager that will create and teardown a session.""" if InternalApiConfig.get_use_internal_api(): yield TracebackSession() return Session = getattr(settings, "Session", None) if Session is None: raise RuntimeError("Session must be set before!") session = Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
Find session index in function call parameter.
def find_session_idx(func: Callable[PS, RT]) -> int: """Find session index in function call parameter.""" func_params = signature(func).parameters try: # func_params is an ordered dict -- this is the "recommended" way of getting the position session_args_idx = tuple(func_params).index("session") except ValueError: raise ValueError(f"Function {func.__qualname__} has no `session` argument") from None return session_args_idx
Provide a session if it isn't provided. If you want to reuse a session or run the function as part of a database transaction, you pass it to the function, if not this wrapper will create one and close it for you.
def provide_session(func: Callable[PS, RT]) -> Callable[PS, RT]: """ Provide a session if it isn't provided. If you want to reuse a session or run the function as part of a database transaction, you pass it to the function, if not this wrapper will create one and close it for you. """ session_args_idx = find_session_idx(func) @wraps(func) def wrapper(*args, **kwargs) -> RT: if "session" in kwargs or session_args_idx < len(args): return func(*args, **kwargs) else: with create_session() as session: return func(*args, session=session, **kwargs) return wrapper
Convert pod to dict.... but *safely*. When pod objects created with one k8s version are unpickled in a python env with a more recent k8s version (in which the object attrs may have changed) the unpickled obj may throw an error because the attr expected on new obj may not be there on the unpickled obj. This function still converts the pod to a dict; the only difference is it populates missing attrs with None. You may compare with https://github.com/kubernetes-client/python/blob/5a96bbcbe21a552cc1f9cda13e0522fafb0dbac8/kubernetes/client/api_client.py#L202 If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is OpenAPI model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data. :meta private:
def sanitize_for_serialization(obj: V1Pod): """ Convert pod to dict.... but *safely*. When pod objects created with one k8s version are unpickled in a python env with a more recent k8s version (in which the object attrs may have changed) the unpickled obj may throw an error because the attr expected on new obj may not be there on the unpickled obj. This function still converts the pod to a dict; the only difference is it populates missing attrs with None. You may compare with https://github.com/kubernetes-client/python/blob/5a96bbcbe21a552cc1f9cda13e0522fafb0dbac8/kubernetes/client/api_client.py#L202 If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is OpenAPI model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data. :meta private: """ if obj is None: return None elif isinstance(obj, (float, bool, bytes, str, int)): return obj elif isinstance(obj, list): return [sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: obj_dict = { obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in obj.openapi_types.items() # below is the only line we change, and we just add default=None for getattr if getattr(obj, attr, None) is not None } return {key: sanitize_for_serialization(val) for key, val in obj_dict.items()}
Convert pod to json and back so that pod is safe. The pod_override in executor_config is a V1Pod object. Such objects created with one k8s version, when unpickled in an env with upgraded k8s version, may blow up when `to_dict` is called, because openapi client code gen calls getattr on all attrs in openapi_types for each object, and when new attrs are added to that list, getattr will fail. Here we re-serialize it to ensure it is not going to blow up. :meta private:
def ensure_pod_is_valid_after_unpickling(pod: V1Pod) -> V1Pod | None: """ Convert pod to json and back so that pod is safe. The pod_override in executor_config is a V1Pod object. Such objects created with one k8s version, when unpickled in an env with upgraded k8s version, may blow up when `to_dict` is called, because openapi client code gen calls getattr on all attrs in openapi_types for each object, and when new attrs are added to that list, getattr will fail. Here we re-serialize it to ensure it is not going to blow up. :meta private: """ try: # if to_dict works, the pod is fine pod.to_dict() return pod except AttributeError: pass try: from kubernetes.client.models.v1_pod import V1Pod except ImportError: return None if not isinstance(pod, V1Pod): return None try: try: from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator except ImportError: from airflow.kubernetes.pre_7_4_0_compatibility.pod_generator import ( # type: ignore[assignment] PodGenerator, ) # now we actually reserialize / deserialize the pod pod_dict = sanitize_for_serialization(pod) return PodGenerator.deserialize_model_dict(pod_dict) except Exception: return None
Specify *NULLS FIRST* to the column ordering. This is only done to Postgres, currently the only backend that supports it. Other databases do not need it since NULL values are considered lower than any other values, and appear first when the order is ASC (ascending).
def nulls_first(col, session: Session) -> dict[str, Any]: """Specify *NULLS FIRST* to the column ordering. This is only done to Postgres, currently the only backend that supports it. Other databases do not need it since NULL values are considered lower than any other values, and appear first when the order is ASC (ascending). """ if session.bind.dialect.name == "postgresql": return nullsfirst(col) else: return col
Apply with_for_update to the SQLAlchemy query if row level locking is in use. This wrapper is needed so we don't use the syntax on unsupported database engines. In particular, MySQL (prior to 8.0) and MariaDB do not support row locking, where we do not support nor recommend running HA scheduler. If a user ignores this and tries anyway, everything will still work, just slightly slower in some circumstances. See https://jira.mariadb.org/browse/MDEV-13115 :param query: An SQLAlchemy Query object :param session: ORM Session :param nowait: If set to True, will pass NOWAIT to supported database backends. :param skip_locked: If set to True, will pass SKIP LOCKED to supported database backends. :param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc) :return: updated query
def with_row_locks( query: Query, session: Session, *, nowait: bool = False, skip_locked: bool = False, **kwargs, ) -> Query: """ Apply with_for_update to the SQLAlchemy query if row level locking is in use. This wrapper is needed so we don't use the syntax on unsupported database engines. In particular, MySQL (prior to 8.0) and MariaDB do not support row locking, where we do not support nor recommend running HA scheduler. If a user ignores this and tries anyway, everything will still work, just slightly slower in some circumstances. See https://jira.mariadb.org/browse/MDEV-13115 :param query: An SQLAlchemy Query object :param session: ORM Session :param nowait: If set to True, will pass NOWAIT to supported database backends. :param skip_locked: If set to True, will pass SKIP LOCKED to supported database backends. :param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc) :return: updated query """ dialect = session.bind.dialect # Don't use row level locks if the MySQL dialect (Mariadb & MySQL < 8) does not support it. if not USE_ROW_LEVEL_LOCKING: return query if dialect.name == "mysql" and not dialect.supports_for_update_of: return query if nowait: kwargs["nowait"] = True if skip_locked: kwargs["skip_locked"] = True return query.with_for_update(**kwargs)
Lock database rows during the context manager block. This is a convenient method for ``with_row_locks`` when we don't need the locked rows. :meta private:
def lock_rows(query: Query, session: Session) -> Generator[None, None, None]: """Lock database rows during the context manager block. This is a convenient method for ``with_row_locks`` when we don't need the locked rows. :meta private: """ locked_rows = with_row_locks(query, session) yield del locked_rows
Return a context manager that will disallow any commit that isn't done via the context manager. The aim of this is to ensure that transaction lifetime is strictly controlled which is especially important in the core scheduler loop. Any commit on the session that is _not_ via this context manager will result in RuntimeError Example usage: .. code:: python with prohibit_commit(session) as guard: # ... do something with session guard.commit() # This would throw an error # session.commit()
def prohibit_commit(session): """ Return a context manager that will disallow any commit that isn't done via the context manager. The aim of this is to ensure that transaction lifetime is strictly controlled which is especially important in the core scheduler loop. Any commit on the session that is _not_ via this context manager will result in RuntimeError Example usage: .. code:: python with prohibit_commit(session) as guard: # ... do something with session guard.commit() # This would throw an error # session.commit() """ return CommitProhibitorGuard(session)
Check if the Error is about not being able to acquire lock.
def is_lock_not_available_error(error: OperationalError): """Check if the Error is about not being able to acquire lock.""" # DB specific error codes: # Postgres: 55P03 # MySQL: 3572, 'Statement aborted because lock(s) could not be acquired immediately and NOWAIT # is set.' # MySQL: 1205, 'Lock wait timeout exceeded; try restarting transaction # (when NOWAIT isn't available) db_err_code = getattr(error.orig, "pgcode", None) or error.orig.args[0] # We could test if error.orig is an instance of # psycopg2.errors.LockNotAvailable/_mysql_exceptions.OperationalError, but that involves # importing it. This doesn't if db_err_code in ("55P03", 1205, 3572): return True return False
Generate a tuple-in-collection operator to use in ``.where()``. For most SQL backends, this generates a simple ``([col, ...]) IN [condition]`` clause. :meta private:
def tuple_in_condition( columns: tuple[ColumnElement, ...], collection: Iterable[Any] | Select, *, session: Session | None = None, ) -> ColumnOperators: """ Generate a tuple-in-collection operator to use in ``.where()``. For most SQL backends, this generates a simple ``([col, ...]) IN [condition]`` clause. :meta private: """ return tuple_(*columns).in_(collection)
Generate a tuple-not-in-collection operator to use in ``.where()``. This is similar to ``tuple_in_condition`` except generating ``NOT IN``. :meta private:
def tuple_not_in_condition( columns: tuple[ColumnElement, ...], collection: Iterable[Any] | Select, *, session: Session | None = None, ) -> ColumnOperators: """ Generate a tuple-not-in-collection operator to use in ``.where()``. This is similar to ``tuple_in_condition`` except generating ``NOT IN``. :meta private: """ return tuple_(*columns).not_in(collection)
Get the correct ORM mapper for the installed SQLAlchemy version.
def get_orm_mapper(): """Get the correct ORM mapper for the installed SQLAlchemy version.""" import sqlalchemy.orm.mapper return sqlalchemy.orm.mapper if is_sqlalchemy_v1() else sqlalchemy.orm.Mapper
Generate random string.
def get_random_string(length=8, choices=string.ascii_letters + string.digits): """Generate random string.""" return "".join(random.choices(choices, k=length))
Convert a string to a boolean.
def to_boolean(astring: str | None) -> bool: """Convert a string to a boolean.""" if astring is None: return False if astring.lower() in TRUE_LIKE_VALUES: return True return False
Create a nested dict representation of this TaskGroup and its children used to construct the Graph.
def task_group_to_dict(task_item_or_group): """Create a nested dict representation of this TaskGroup and its children used to construct the Graph.""" from airflow.models.abstractoperator import AbstractOperator from airflow.models.mappedoperator import MappedOperator if isinstance(task := task_item_or_group, AbstractOperator): setup_teardown_type = {} is_mapped = {} if task.is_setup is True: setup_teardown_type["setupTeardownType"] = "setup" elif task.is_teardown is True: setup_teardown_type["setupTeardownType"] = "teardown" if isinstance(task, MappedOperator): is_mapped["isMapped"] = True return { "id": task.task_id, "value": { "label": task.label, "labelStyle": f"fill:{task.ui_fgcolor};", "style": f"fill:{task.ui_color};", "rx": 5, "ry": 5, **is_mapped, **setup_teardown_type, }, } task_group = task_item_or_group is_mapped = isinstance(task_group, MappedTaskGroup) children = [ task_group_to_dict(child) for child in sorted(task_group.children.values(), key=lambda t: t.label) ] if task_group.upstream_group_ids or task_group.upstream_task_ids: children.append( { "id": task_group.upstream_join_id, "value": { "label": "", "labelStyle": f"fill:{task_group.ui_fgcolor};", "style": f"fill:{task_group.ui_color};", "shape": "circle", }, } ) if task_group.downstream_group_ids or task_group.downstream_task_ids: # This is the join node used to reduce the number of edges between two TaskGroup. children.append( { "id": task_group.downstream_join_id, "value": { "label": "", "labelStyle": f"fill:{task_group.ui_fgcolor};", "style": f"fill:{task_group.ui_color};", "shape": "circle", }, } ) return { "id": task_group.group_id, "value": { "label": task_group.label, "labelStyle": f"fill:{task_group.ui_fgcolor};", "style": f"fill:{task_group.ui_color}", "rx": 5, "ry": 5, "clusterLabelPos": "top", "tooltip": task_group.tooltip, "isMapped": is_mapped, }, "children": children, }
Wrap a value to ensure it is rendered as-is without applying Jinja templating to its contents. Designed for use in an operator's template field. :param value: The value to be rendered without templating
def literal(value: Any) -> LiteralValue: """ Wrap a value to ensure it is rendered as-is without applying Jinja templating to its contents. Designed for use in an operator's template field. :param value: The value to be rendered without templating """ return LiteralValue(value)
Determine if a given datetime.datetime is aware. The concept is defined in Python documentation. Assuming the tzinfo is either None or a proper ``datetime.tzinfo`` instance, ``value.utcoffset()`` implements the appropriate logic. .. seealso:: http://docs.python.org/library/datetime.html#datetime.tzinfo
def is_localized(value): """Determine if a given datetime.datetime is aware. The concept is defined in Python documentation. Assuming the tzinfo is either None or a proper ``datetime.tzinfo`` instance, ``value.utcoffset()`` implements the appropriate logic. .. seealso:: http://docs.python.org/library/datetime.html#datetime.tzinfo """ return value.utcoffset() is not None
Determine if a given datetime.datetime is naive. The concept is defined in Python documentation. Assuming the tzinfo is either None or a proper ``datetime.tzinfo`` instance, ``value.utcoffset()`` implements the appropriate logic. .. seealso:: http://docs.python.org/library/datetime.html#datetime.tzinfo
def is_naive(value): """Determine if a given datetime.datetime is naive. The concept is defined in Python documentation. Assuming the tzinfo is either None or a proper ``datetime.tzinfo`` instance, ``value.utcoffset()`` implements the appropriate logic. .. seealso:: http://docs.python.org/library/datetime.html#datetime.tzinfo """ return value.utcoffset() is None
Get the current date and time in UTC.
def utcnow() -> dt.datetime: """Get the current date and time in UTC.""" return dt.datetime.now(tz=utc)
Get the epoch in the user's timezone.
def utc_epoch() -> dt.datetime: """Get the epoch in the user's timezone.""" # pendulum utcnow() is not used as that sets a TimezoneInfo object # instead of a Timezone. This is not picklable and also creates issues # when using replace() result = dt.datetime(1970, 1, 1) result = result.replace(tzinfo=utc) return result
Create a datetime with the default timezone added if none is associated. :param value: datetime :return: datetime with tzinfo
def convert_to_utc(value: dt.datetime | None) -> DateTime | None: """Create a datetime with the default timezone added if none is associated. :param value: datetime :return: datetime with tzinfo """ if value is None: return value if not is_localized(value): from airflow.settings import TIMEZONE value = pendulum.instance(value, TIMEZONE) return pendulum.instance(value.astimezone(utc))
Make a naive datetime.datetime in a given time zone aware. :param value: datetime :param timezone: timezone :return: localized datetime in settings.TIMEZONE or timezone
def make_aware(value: dt.datetime | None, timezone: dt.tzinfo | None = None) -> dt.datetime | None: """ Make a naive datetime.datetime in a given time zone aware. :param value: datetime :param timezone: timezone :return: localized datetime in settings.TIMEZONE or timezone """ if timezone is None: from airflow.settings import TIMEZONE timezone = TIMEZONE if not value: return None # Check that we won't overwrite the timezone of an aware datetime. if is_localized(value): raise ValueError(f"make_aware expects a naive datetime, got {value}") # In case we move clock back we want to schedule the run at the time of the second # instance of the same clock time rather than the first one. # Fold parameter has no impact in other cases, so we can safely set it to 1 here value = value.replace(fold=1) localized = getattr(timezone, "localize", None) if localized is not None: # This method is available for pytz time zones return localized(value) convert = getattr(timezone, "convert", None) if convert is not None: # For pendulum return convert(value) # This may be wrong around DST changes! return value.replace(tzinfo=timezone)