repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow | airflow-main/airflow/cli/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/cli/cli_config.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Explicit configuration and definition of Airflow CLI commands."""
from __future__ import annotations
import argparse
import json
import os
import textwrap
from argparse import ArgumentError
from typing import Callable, Iterable, NamedTuple, Union
import lazy_object_proxy
from airflow import settings
from airflow.cli.commands.legacy_commands import check_legacy_command
from airflow.configuration import conf
from airflow.executors.executor_constants import CELERY_EXECUTOR, CELERY_KUBERNETES_EXECUTOR
from airflow.executors.executor_loader import ExecutorLoader
from airflow.settings import _ENABLE_AIP_44
from airflow.utils.cli import ColorMode
from airflow.utils.module_loading import import_string
from airflow.utils.state import DagRunState, JobState
from airflow.utils.timezone import parse as parsedate
BUILD_DOCS = "BUILDING_AIRFLOW_DOCS" in os.environ
def lazy_load_command(import_path: str) -> Callable:
"""Create a lazy loader for command."""
_, _, name = import_path.rpartition(".")
def command(*args, **kwargs):
func = import_string(import_path)
return func(*args, **kwargs)
command.__name__ = name
return command
class DefaultHelpParser(argparse.ArgumentParser):
"""CustomParser to display help message."""
def _check_value(self, action, value):
"""Override _check_value and check conditionally added command."""
if action.dest == "subcommand" and value == "celery":
executor = conf.get("core", "EXECUTOR")
if executor not in (CELERY_EXECUTOR, CELERY_KUBERNETES_EXECUTOR):
executor_cls, _ = ExecutorLoader.import_executor_cls(executor)
classes = ()
try:
from airflow.providers.celery.executors.celery_executor import CeleryExecutor
classes += (CeleryExecutor,)
except ImportError:
message = (
"The celery subcommand requires that you pip install the celery module. "
"To do it, run: pip install 'apache-airflow[celery]'"
)
raise ArgumentError(action, message)
try:
from airflow.providers.celery.executors.celery_kubernetes_executor import (
CeleryKubernetesExecutor,
)
classes += (CeleryKubernetesExecutor,)
except ImportError:
pass
if not issubclass(executor_cls, classes):
message = (
f"celery subcommand works only with CeleryExecutor, CeleryKubernetesExecutor and "
f"executors derived from them, your current executor: {executor}, subclassed from: "
f'{", ".join([base_cls.__qualname__ for base_cls in executor_cls.__bases__])}'
)
raise ArgumentError(action, message)
if action.dest == "subcommand" and value == "kubernetes":
try:
import kubernetes.client # noqa: F401
except ImportError:
message = (
"The kubernetes subcommand requires that you pip install the kubernetes python client. "
"To do it, run: pip install 'apache-airflow[cncf.kubernetes]'"
)
raise ArgumentError(action, message)
if action.choices is not None and value not in action.choices:
check_legacy_command(action, value)
super()._check_value(action, value)
def error(self, message):
"""Override error and use print_instead of print_usage."""
self.print_help()
self.exit(2, f"\n{self.prog} command error: {message}, see help above.\n")
# Used in Arg to enable `None' as a distinct value from "not passed"
_UNSET = object()
class Arg:
"""Class to keep information about command line argument."""
def __init__(
self,
flags=_UNSET,
help=_UNSET,
action=_UNSET,
default=_UNSET,
nargs=_UNSET,
type=_UNSET,
choices=_UNSET,
required=_UNSET,
metavar=_UNSET,
dest=_UNSET,
):
self.flags = flags
self.kwargs = {}
for k, v in locals().items():
if v is _UNSET:
continue
if k in ("self", "flags"):
continue
self.kwargs[k] = v
def add_to_parser(self, parser: argparse.ArgumentParser):
"""Add this argument to an ArgumentParser."""
if "metavar" in self.kwargs and "type" not in self.kwargs:
if self.kwargs["metavar"] == "DIRPATH":
type = lambda x: self._is_valid_directory(parser, x)
self.kwargs["type"] = type
parser.add_argument(*self.flags, **self.kwargs)
def _is_valid_directory(self, parser, arg):
if not os.path.isdir(arg):
parser.error(f"The directory '{arg}' does not exist!")
return arg
def positive_int(*, allow_zero):
"""Define a positive int type for an argument."""
def _check(value):
try:
value = int(value)
if allow_zero and value == 0:
return value
if value > 0:
return value
except ValueError:
pass
raise argparse.ArgumentTypeError(f"invalid positive int value: '{value}'")
return _check
def string_list_type(val):
"""Parses comma-separated list and returns list of string (strips whitespace)."""
return [x.strip() for x in val.split(",")]
def string_lower_type(val):
"""Lowers arg."""
if not val:
return
return val.strip().lower()
# Shared
ARG_DAG_ID = Arg(("dag_id",), help="The id of the dag")
ARG_TASK_ID = Arg(("task_id",), help="The id of the task")
ARG_EXECUTION_DATE = Arg(("execution_date",), help="The execution date of the DAG", type=parsedate)
ARG_EXECUTION_DATE_OPTIONAL = Arg(
("execution_date",), nargs="?", help="The execution date of the DAG (optional)", type=parsedate
)
ARG_EXECUTION_DATE_OR_RUN_ID = Arg(
("execution_date_or_run_id",), help="The execution_date of the DAG or run_id of the DAGRun"
)
ARG_EXECUTION_DATE_OR_RUN_ID_OPTIONAL = Arg(
("execution_date_or_run_id",),
nargs="?",
help="The execution_date of the DAG or run_id of the DAGRun (optional)",
)
ARG_TASK_REGEX = Arg(
("-t", "--task-regex"), help="The regex to filter specific task_ids to backfill (optional)"
)
ARG_SUBDIR = Arg(
("-S", "--subdir"),
help=(
"File location or directory from which to look for the dag. "
"Defaults to '[AIRFLOW_HOME]/dags' where [AIRFLOW_HOME] is the "
"value you set for 'AIRFLOW_HOME' config you set in 'airflow.cfg' "
),
default="[AIRFLOW_HOME]/dags" if BUILD_DOCS else settings.DAGS_FOLDER,
)
ARG_START_DATE = Arg(("-s", "--start-date"), help="Override start_date YYYY-MM-DD", type=parsedate)
ARG_END_DATE = Arg(("-e", "--end-date"), help="Override end_date YYYY-MM-DD", type=parsedate)
ARG_OUTPUT_PATH = Arg(
(
"-o",
"--output-path",
),
help="The output for generated yaml files",
type=str,
default="[CWD]" if BUILD_DOCS else os.getcwd(),
)
ARG_DRY_RUN = Arg(
("-n", "--dry-run"),
help="Perform a dry run for each task. Only renders Template Fields for each task, nothing else",
action="store_true",
)
ARG_PID = Arg(("--pid",), help="PID file location", nargs="?")
ARG_DAEMON = Arg(
("-D", "--daemon"), help="Daemonize instead of running in the foreground", action="store_true"
)
ARG_STDERR = Arg(("--stderr",), help="Redirect stderr to this file")
ARG_STDOUT = Arg(("--stdout",), help="Redirect stdout to this file")
ARG_LOG_FILE = Arg(("-l", "--log-file"), help="Location of the log file")
ARG_YES = Arg(
("-y", "--yes"),
help="Do not prompt to confirm. Use with care!",
action="store_true",
default=False,
)
ARG_OUTPUT = Arg(
(
"-o",
"--output",
),
help="Output format. Allowed values: json, yaml, plain, table (default: table)",
metavar="(table, json, yaml, plain)",
choices=("table", "json", "yaml", "plain"),
default="table",
)
ARG_COLOR = Arg(
("--color",),
help="Do emit colored output (default: auto)",
choices={ColorMode.ON, ColorMode.OFF, ColorMode.AUTO},
default=ColorMode.AUTO,
)
# DB args
ARG_VERSION_RANGE = Arg(
("-r", "--range"),
help="Version range(start:end) for offline sql generation. Example: '2.0.2:2.2.3'",
default=None,
)
ARG_REVISION_RANGE = Arg(
("--revision-range",),
help=(
"Migration revision range(start:end) to use for offline sql generation. "
"Example: ``a13f7613ad25:7b2661a43ba3``"
),
default=None,
)
# list_dag_runs
ARG_DAG_ID_REQ_FLAG = Arg(
("-d", "--dag-id"), required=True, help="The id of the dag"
) # TODO: convert this to a positional arg in Airflow 3
ARG_NO_BACKFILL = Arg(
("--no-backfill",), help="filter all the backfill dagruns given the dag id", action="store_true"
)
dagrun_states = tuple(state.value for state in DagRunState)
ARG_DR_STATE = Arg(
("--state",),
help="Only list the DAG runs corresponding to the state",
metavar=", ".join(dagrun_states),
choices=dagrun_states,
)
# list_jobs
ARG_DAG_ID_OPT = Arg(("-d", "--dag-id"), help="The id of the dag")
ARG_LIMIT = Arg(("--limit",), help="Return a limited number of records")
job_states = tuple(state.value for state in JobState)
ARG_JOB_STATE = Arg(
("--state",),
help="Only list the jobs corresponding to the state",
metavar=", ".join(job_states),
choices=job_states,
)
# next_execution
ARG_NUM_EXECUTIONS = Arg(
("-n", "--num-executions"),
default=1,
type=positive_int(allow_zero=False),
help="The number of next execution datetimes to show",
)
# backfill
ARG_MARK_SUCCESS = Arg(
("-m", "--mark-success"), help="Mark jobs as succeeded without running them", action="store_true"
)
ARG_VERBOSE = Arg(("-v", "--verbose"), help="Make logging output more verbose", action="store_true")
ARG_LOCAL = Arg(("-l", "--local"), help="Run the task using the LocalExecutor", action="store_true")
ARG_DONOT_PICKLE = Arg(
("-x", "--donot-pickle"),
help=(
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code"
),
action="store_true",
)
ARG_BF_IGNORE_DEPENDENCIES = Arg(
("-i", "--ignore-dependencies"),
help=(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction "
"with task_regex"
),
action="store_true",
)
ARG_BF_IGNORE_FIRST_DEPENDS_ON_PAST = Arg(
("-I", "--ignore-first-depends-on-past"),
help=(
"Ignores depends_on_past dependencies for the first "
"set of tasks only (subsequent executions in the backfill "
"DO respect depends_on_past)"
),
action="store_true",
)
ARG_POOL = Arg(("--pool",), "Resource pool to use")
ARG_DELAY_ON_LIMIT = Arg(
("--delay-on-limit",),
help=(
"Amount of time in seconds to wait when the limit "
"on maximum active dag runs (max_active_runs) has "
"been reached before trying to execute a dag run "
"again"
),
type=float,
default=1.0,
)
ARG_RESET_DAG_RUN = Arg(
("--reset-dagruns",),
help=(
"if set, the backfill will delete existing "
"backfill-related DAG runs and start "
"anew with fresh, running DAG runs"
),
action="store_true",
)
ARG_RERUN_FAILED_TASKS = Arg(
("--rerun-failed-tasks",),
help=(
"if set, the backfill will auto-rerun "
"all the failed tasks for the backfill date range "
"instead of throwing exceptions"
),
action="store_true",
)
ARG_CONTINUE_ON_FAILURES = Arg(
("--continue-on-failures",),
help=("if set, the backfill will keep going even if some of the tasks failed"),
action="store_true",
)
ARG_DISABLE_RETRY = Arg(
("--disable-retry",),
help=("if set, the backfill will set tasks as failed without retrying."),
action="store_true",
)
ARG_RUN_BACKWARDS = Arg(
(
"-B",
"--run-backwards",
),
help=(
"if set, the backfill will run tasks from the most "
"recent day first. if there are tasks that depend_on_past "
"this option will throw an exception"
),
action="store_true",
)
ARG_TREAT_DAG_AS_REGEX = Arg(
("--treat-dag-as-regex",),
help=("if set, dag_id will be treated as regex instead of an exact string"),
action="store_true",
)
# test_dag
ARG_SHOW_DAGRUN = Arg(
("--show-dagrun",),
help=(
"After completing the backfill, shows the diagram for current DAG Run.\n"
"\n"
"The diagram is in DOT language\n"
),
action="store_true",
)
ARG_IMGCAT_DAGRUN = Arg(
("--imgcat-dagrun",),
help=(
"After completing the dag run, prints a diagram on the screen for the "
"current DAG Run using the imgcat tool.\n"
),
action="store_true",
)
ARG_SAVE_DAGRUN = Arg(
("--save-dagrun",),
help="After completing the backfill, saves the diagram for current DAG Run to the indicated file.\n\n",
)
# list_tasks
ARG_TREE = Arg(("-t", "--tree"), help="Tree view", action="store_true")
# tasks_run
# This is a hidden option -- not meant for users to set or know about
ARG_SHUT_DOWN_LOGGING = Arg(
("--no-shut-down-logging",),
help=argparse.SUPPRESS,
dest="shut_down_logging",
action="store_false",
default=True,
)
# clear
ARG_UPSTREAM = Arg(("-u", "--upstream"), help="Include upstream tasks", action="store_true")
ARG_ONLY_FAILED = Arg(("-f", "--only-failed"), help="Only failed jobs", action="store_true")
ARG_ONLY_RUNNING = Arg(("-r", "--only-running"), help="Only running jobs", action="store_true")
ARG_DOWNSTREAM = Arg(("-d", "--downstream"), help="Include downstream tasks", action="store_true")
ARG_EXCLUDE_SUBDAGS = Arg(("-x", "--exclude-subdags"), help="Exclude subdags", action="store_true")
ARG_EXCLUDE_PARENTDAG = Arg(
("-X", "--exclude-parentdag"),
help="Exclude ParentDAGS if the task cleared is a part of a SubDAG",
action="store_true",
)
ARG_DAG_REGEX = Arg(
("-R", "--dag-regex"), help="Search dag_id as regex instead of exact string", action="store_true"
)
# show_dag
ARG_SAVE = Arg(("-s", "--save"), help="Saves the result to the indicated file.")
ARG_IMGCAT = Arg(("--imgcat",), help="Displays graph using the imgcat tool.", action="store_true")
# trigger_dag
ARG_RUN_ID = Arg(("-r", "--run-id"), help="Helps to identify this run")
ARG_CONF = Arg(("-c", "--conf"), help="JSON string that gets pickled into the DagRun's conf attribute")
ARG_EXEC_DATE = Arg(("-e", "--exec-date"), help="The execution date of the DAG", type=parsedate)
ARG_REPLACE_MICRO = Arg(
("--no-replace-microseconds",),
help="whether microseconds should be zeroed",
dest="replace_microseconds",
action="store_false",
default=True,
)
# db
ARG_DB_TABLES = Arg(
("-t", "--tables"),
help=lazy_object_proxy.Proxy(
lambda: f"Table names to perform maintenance on (use comma-separated list).\n"
f"Options: {import_string('airflow.cli.commands.db_command.all_tables')}"
),
type=string_list_type,
)
ARG_DB_CLEANUP_TIMESTAMP = Arg(
("--clean-before-timestamp",),
help="The date or timestamp before which data should be purged.\n"
"If no timezone info is supplied then dates are assumed to be in airflow default timezone.\n"
"Example: '2022-01-01 00:00:00+01:00'",
type=parsedate,
required=True,
)
ARG_DB_DRY_RUN = Arg(
("--dry-run",),
help="Perform a dry run",
action="store_true",
)
ARG_DB_SKIP_ARCHIVE = Arg(
("--skip-archive",),
help="Don't preserve purged records in an archive table.",
action="store_true",
)
ARG_DB_EXPORT_FORMAT = Arg(
("--export-format",),
help="The file format to export the cleaned data",
choices=("csv",),
default="csv",
)
ARG_DB_OUTPUT_PATH = Arg(
("--output-path",),
metavar="DIRPATH",
help="The path to the output directory to export the cleaned data. This directory must exist.",
required=True,
)
ARG_DB_DROP_ARCHIVES = Arg(
("--drop-archives",),
help="Drop the archive tables after exporting. Use with caution.",
action="store_true",
)
ARG_DB_RETRY = Arg(
("--retry",),
default=0,
type=positive_int(allow_zero=True),
help="Retry database check upon failure",
)
ARG_DB_RETRY_DELAY = Arg(
("--retry-delay",),
default=1,
type=positive_int(allow_zero=False),
help="Wait time between retries in seconds",
)
# pool
ARG_POOL_NAME = Arg(("pool",), metavar="NAME", help="Pool name")
ARG_POOL_SLOTS = Arg(("slots",), type=int, help="Pool slots")
ARG_POOL_DESCRIPTION = Arg(("description",), help="Pool description")
ARG_POOL_IMPORT = Arg(
("file",),
metavar="FILEPATH",
help="Import pools from JSON file. Example format::\n"
+ textwrap.indent(
textwrap.dedent(
"""
{
"pool_1": {"slots": 5, "description": ""},
"pool_2": {"slots": 10, "description": "test"}
}"""
),
" " * 4,
),
)
ARG_POOL_EXPORT = Arg(("file",), metavar="FILEPATH", help="Export all pools to JSON file")
# variables
ARG_VAR = Arg(("key",), help="Variable key")
ARG_VAR_VALUE = Arg(("value",), metavar="VALUE", help="Variable value")
ARG_DEFAULT = Arg(
("-d", "--default"), metavar="VAL", default=None, help="Default value returned if variable does not exist"
)
ARG_DESERIALIZE_JSON = Arg(("-j", "--json"), help="Deserialize JSON variable", action="store_true")
ARG_SERIALIZE_JSON = Arg(("-j", "--json"), help="Serialize JSON variable", action="store_true")
ARG_VAR_IMPORT = Arg(("file",), help="Import variables from JSON file")
ARG_VAR_EXPORT = Arg(("file",), help="Export all variables to JSON file")
# kerberos
ARG_PRINCIPAL = Arg(("principal",), help="kerberos principal", nargs="?")
ARG_KEYTAB = Arg(("-k", "--keytab"), help="keytab", nargs="?", default=conf.get("kerberos", "keytab"))
# run
ARG_INTERACTIVE = Arg(
("-N", "--interactive"),
help="Do not capture standard output and error streams (useful for interactive debugging)",
action="store_true",
)
# TODO(aoen): "force" is a poor choice of name here since it implies it overrides
# all dependencies (not just past success), e.g. the ignore_depends_on_past
# dependency. This flag should be deprecated and renamed to 'ignore_ti_state' and
# the "ignore_all_dependencies" command should be called the"force" command
# instead.
ARG_FORCE = Arg(
("-f", "--force"),
help="Ignore previous task instance state, rerun regardless if task already succeeded/failed",
action="store_true",
)
ARG_RAW = Arg(("-r", "--raw"), argparse.SUPPRESS, "store_true")
ARG_IGNORE_ALL_DEPENDENCIES = Arg(
("-A", "--ignore-all-dependencies"),
help="Ignores all non-critical dependencies, including ignore_ti_state and ignore_task_deps",
action="store_true",
)
# TODO(aoen): ignore_dependencies is a poor choice of name here because it is too
# vague (e.g. a task being in the appropriate state to be run is also a dependency
# but is not ignored by this flag), the name 'ignore_task_dependencies' is
# slightly better (as it ignores all dependencies that are specific to the task),
# so deprecate the old command name and use this instead.
ARG_IGNORE_DEPENDENCIES = Arg(
("-i", "--ignore-dependencies"),
help="Ignore task-specific dependencies, e.g. upstream, depends_on_past, and retry delay dependencies",
action="store_true",
)
ARG_IGNORE_DEPENDS_ON_PAST = Arg(
("-I", "--ignore-depends-on-past"),
help="Deprecated -- use `--depends-on-past ignore` instead. "
"Ignore depends_on_past dependencies (but respect upstream dependencies)",
action="store_true",
)
ARG_DEPENDS_ON_PAST = Arg(
("-d", "--depends-on-past"),
help="Determine how Airflow should deal with past dependencies. The default action is `check`, Airflow "
"will check if the the past dependencies are met for the tasks having `depends_on_past=True` before run "
"them, if `ignore` is provided, the past dependencies will be ignored, if `wait` is provided and "
"`depends_on_past=True`, Airflow will wait the past dependencies until they are met before running or "
"skipping the task",
choices={"check", "ignore", "wait"},
default="check",
)
ARG_SHIP_DAG = Arg(
("--ship-dag",), help="Pickles (serializes) the DAG and ships it to the worker", action="store_true"
)
ARG_PICKLE = Arg(("-p", "--pickle"), help="Serialized pickle object of the entire dag (used internally)")
ARG_JOB_ID = Arg(("-j", "--job-id"), help=argparse.SUPPRESS)
ARG_CFG_PATH = Arg(("--cfg-path",), help="Path to config file to use instead of airflow.cfg")
ARG_MAP_INDEX = Arg(("--map-index",), type=int, default=-1, help="Mapped task index")
ARG_READ_FROM_DB = Arg(("--read-from-db",), help="Read dag from DB instead of dag file", action="store_true")
# database
ARG_MIGRATION_TIMEOUT = Arg(
("-t", "--migration-wait-timeout"),
help="timeout to wait for db to migrate ",
type=int,
default=60,
)
ARG_DB_RESERIALIZE_DAGS = Arg(
("--no-reserialize-dags",),
# Not intended for user, so dont show in help
help=argparse.SUPPRESS,
action="store_false",
default=True,
dest="reserialize_dags",
)
ARG_DB_VERSION__UPGRADE = Arg(
("-n", "--to-version"),
help=(
"(Optional) The airflow version to upgrade to. Note: must provide either "
"`--to-revision` or `--to-version`."
),
)
ARG_DB_REVISION__UPGRADE = Arg(
("-r", "--to-revision"),
help="(Optional) If provided, only run migrations up to and including this Alembic revision.",
)
ARG_DB_VERSION__DOWNGRADE = Arg(
("-n", "--to-version"),
help="(Optional) If provided, only run migrations up to this version.",
)
ARG_DB_FROM_VERSION = Arg(
("--from-version",),
help="(Optional) If generating sql, may supply a *from* version",
)
ARG_DB_REVISION__DOWNGRADE = Arg(
("-r", "--to-revision"),
help="The Alembic revision to downgrade to. Note: must provide either `--to-revision` or `--to-version`.",
)
ARG_DB_FROM_REVISION = Arg(
("--from-revision",),
help="(Optional) If generating sql, may supply a *from* Alembic revision",
)
ARG_DB_SQL_ONLY = Arg(
("-s", "--show-sql-only"),
help="Don't actually run migrations; just print out sql scripts for offline migration. "
"Required if using either `--from-revision` or `--from-version`.",
action="store_true",
default=False,
)
ARG_DB_SKIP_INIT = Arg(
("-s", "--skip-init"),
help="Only remove tables; do not perform db init.",
action="store_true",
default=False,
)
# webserver
ARG_PORT = Arg(
("-p", "--port"),
default=conf.get("webserver", "WEB_SERVER_PORT"),
type=int,
help="The port on which to run the server",
)
ARG_SSL_CERT = Arg(
("--ssl-cert",),
default=conf.get("webserver", "WEB_SERVER_SSL_CERT"),
help="Path to the SSL certificate for the webserver",
)
ARG_SSL_KEY = Arg(
("--ssl-key",),
default=conf.get("webserver", "WEB_SERVER_SSL_KEY"),
help="Path to the key to use with the SSL certificate",
)
ARG_WORKERS = Arg(
("-w", "--workers"),
default=conf.get("webserver", "WORKERS"),
type=int,
help="Number of workers to run the webserver on",
)
ARG_WORKERCLASS = Arg(
("-k", "--workerclass"),
default=conf.get("webserver", "WORKER_CLASS"),
choices=["sync", "eventlet", "gevent", "tornado"],
help="The worker class to use for Gunicorn",
)
ARG_WORKER_TIMEOUT = Arg(
("-t", "--worker-timeout"),
default=conf.get("webserver", "WEB_SERVER_WORKER_TIMEOUT"),
type=int,
help="The timeout for waiting on webserver workers",
)
ARG_HOSTNAME = Arg(
("-H", "--hostname"),
default=conf.get("webserver", "WEB_SERVER_HOST"),
help="Set the hostname on which to run the web server",
)
ARG_DEBUG = Arg(
("-d", "--debug"), help="Use the server that ships with Flask in debug mode", action="store_true"
)
ARG_ACCESS_LOGFILE = Arg(
("-A", "--access-logfile"),
default=conf.get("webserver", "ACCESS_LOGFILE"),
help="The logfile to store the webserver access log. Use '-' to print to stdout",
)
ARG_ERROR_LOGFILE = Arg(
("-E", "--error-logfile"),
default=conf.get("webserver", "ERROR_LOGFILE"),
help="The logfile to store the webserver error log. Use '-' to print to stderr",
)
ARG_ACCESS_LOGFORMAT = Arg(
("-L", "--access-logformat"),
default=conf.get("webserver", "ACCESS_LOGFORMAT"),
help="The access log format for gunicorn logs",
)
# internal-api
ARG_INTERNAL_API_PORT = Arg(
("-p", "--port"),
default=9080,
type=int,
help="The port on which to run the server",
)
ARG_INTERNAL_API_WORKERS = Arg(
("-w", "--workers"),
default=4,
type=int,
help="Number of workers to run the Internal API-on",
)
ARG_INTERNAL_API_WORKERCLASS = Arg(
("-k", "--workerclass"),
default="sync",
choices=["sync", "eventlet", "gevent", "tornado"],
help="The worker class to use for Gunicorn",
)
ARG_INTERNAL_API_WORKER_TIMEOUT = Arg(
("-t", "--worker-timeout"),
default=120,
type=int,
help="The timeout for waiting on Internal API workers",
)
ARG_INTERNAL_API_HOSTNAME = Arg(
("-H", "--hostname"),
default="0.0.0.0",
help="Set the hostname on which to run the web server",
)
ARG_INTERNAL_API_ACCESS_LOGFILE = Arg(
("-A", "--access-logfile"),
help="The logfile to store the access log. Use '-' to print to stdout",
)
ARG_INTERNAL_API_ERROR_LOGFILE = Arg(
("-E", "--error-logfile"),
help="The logfile to store the error log. Use '-' to print to stderr",
)
ARG_INTERNAL_API_ACCESS_LOGFORMAT = Arg(
("-L", "--access-logformat"),
help="The access log format for gunicorn logs",
)
# scheduler
ARG_NUM_RUNS = Arg(
("-n", "--num-runs"),
default=conf.getint("scheduler", "num_runs"),
type=int,
help="Set the number of runs to execute before exiting",
)
ARG_DO_PICKLE = Arg(
("-p", "--do-pickle"),
default=False,
help=(
"Attempt to pickle the DAG object to send over "
"to the workers, instead of letting workers run their version "
"of the code"
),
action="store_true",
)
# worker
ARG_QUEUES = Arg(
("-q", "--queues"),
help="Comma delimited list of queues to serve",
default=conf.get("operators", "DEFAULT_QUEUE"),
)
ARG_CONCURRENCY = Arg(
("-c", "--concurrency"),
type=int,
help="The number of worker processes",
default=conf.get("celery", "worker_concurrency"),
)
ARG_CELERY_HOSTNAME = Arg(
("-H", "--celery-hostname"),
help="Set the hostname of celery worker if you have multiple workers on a single machine",
)
ARG_UMASK = Arg(
("-u", "--umask"),
help="Set the umask of celery worker in daemon mode",
)
ARG_WITHOUT_MINGLE = Arg(
("--without-mingle",),
default=False,
help="Don't synchronize with other workers at start-up",
action="store_true",
)
ARG_WITHOUT_GOSSIP = Arg(
("--without-gossip",),
default=False,
help="Don't subscribe to other workers events",
action="store_true",
)
# flower
ARG_BROKER_API = Arg(("-a", "--broker-api"), help="Broker API")
ARG_FLOWER_HOSTNAME = Arg(
("-H", "--hostname"),
default=conf.get("celery", "FLOWER_HOST"),
help="Set the hostname on which to run the server",
)
ARG_FLOWER_PORT = Arg(
("-p", "--port"),
default=conf.get("celery", "FLOWER_PORT"),
type=int,
help="The port on which to run the server",
)
ARG_FLOWER_CONF = Arg(("-c", "--flower-conf"), help="Configuration file for flower")
ARG_FLOWER_URL_PREFIX = Arg(
("-u", "--url-prefix"), default=conf.get("celery", "FLOWER_URL_PREFIX"), help="URL prefix for Flower"
)
ARG_FLOWER_BASIC_AUTH = Arg(
("-A", "--basic-auth"),
default=conf.get("celery", "FLOWER_BASIC_AUTH"),
help=(
"Securing Flower with Basic Authentication. "
"Accepts user:password pairs separated by a comma. "
"Example: flower_basic_auth = user1:password1,user2:password2"
),
)
ARG_TASK_PARAMS = Arg(("-t", "--task-params"), help="Sends a JSON params dict to the task")
ARG_POST_MORTEM = Arg(
("-m", "--post-mortem"), action="store_true", help="Open debugger on uncaught exception"
)
ARG_ENV_VARS = Arg(
("--env-vars",),
help="Set env var in both parsing time and runtime for each of entry supplied in a JSON dict",
type=json.loads,
)
# connections
ARG_CONN_ID = Arg(("conn_id",), help="Connection id, required to get/add/delete/test a connection", type=str)
ARG_CONN_ID_FILTER = Arg(
("--conn-id",), help="If passed, only items with the specified connection ID will be displayed", type=str
)
ARG_CONN_URI = Arg(
("--conn-uri",), help="Connection URI, required to add a connection without conn_type", type=str
)
ARG_CONN_JSON = Arg(
("--conn-json",), help="Connection JSON, required to add a connection using JSON representation", type=str
)
ARG_CONN_TYPE = Arg(
("--conn-type",), help="Connection type, required to add a connection without conn_uri", type=str
)
ARG_CONN_DESCRIPTION = Arg(
("--conn-description",), help="Connection description, optional when adding a connection", type=str
)
ARG_CONN_HOST = Arg(("--conn-host",), help="Connection host, optional when adding a connection", type=str)
ARG_CONN_LOGIN = Arg(("--conn-login",), help="Connection login, optional when adding a connection", type=str)
ARG_CONN_PASSWORD = Arg(
("--conn-password",), help="Connection password, optional when adding a connection", type=str
)
ARG_CONN_SCHEMA = Arg(
("--conn-schema",), help="Connection schema, optional when adding a connection", type=str
)
ARG_CONN_PORT = Arg(("--conn-port",), help="Connection port, optional when adding a connection", type=str)
ARG_CONN_EXTRA = Arg(
("--conn-extra",), help="Connection `Extra` field, optional when adding a connection", type=str
)
ARG_CONN_EXPORT = Arg(
("file",),
help="Output file path for exporting the connections",
type=argparse.FileType("w", encoding="UTF-8"),
)
ARG_CONN_EXPORT_FORMAT = Arg(
("--format",),
help="Deprecated -- use `--file-format` instead. File format to use for the export.",
type=str,
choices=["json", "yaml", "env"],
)
ARG_CONN_EXPORT_FILE_FORMAT = Arg(
("--file-format",), help="File format for the export", type=str, choices=["json", "yaml", "env"]
)
ARG_CONN_SERIALIZATION_FORMAT = Arg(
("--serialization-format",),
help="When exporting as `.env` format, defines how connections should be serialized. Default is `uri`.",
type=string_lower_type,
choices=["json", "uri"],
)
ARG_CONN_IMPORT = Arg(("file",), help="Import connections from a file")
ARG_CONN_OVERWRITE = Arg(
("--overwrite",),
help="Overwrite existing entries if a conflict occurs",
required=False,
action="store_true",
)
# providers
ARG_PROVIDER_NAME = Arg(
("provider_name",), help="Provider name, required to get provider information", type=str
)
ARG_FULL = Arg(
("-f", "--full"),
help="Full information about the provider, including documentation information.",
required=False,
action="store_true",
)
# users
ARG_USERNAME = Arg(("-u", "--username"), help="Username of the user", required=True, type=str)
ARG_USERNAME_OPTIONAL = Arg(("-u", "--username"), help="Username of the user", type=str)
ARG_FIRSTNAME = Arg(("-f", "--firstname"), help="First name of the user", required=True, type=str)
ARG_LASTNAME = Arg(("-l", "--lastname"), help="Last name of the user", required=True, type=str)
ARG_ROLE = Arg(
("-r", "--role"),
help="Role of the user. Existing roles include Admin, User, Op, Viewer, and Public",
required=True,
type=str,
)
ARG_EMAIL = Arg(("-e", "--email"), help="Email of the user", required=True, type=str)
ARG_EMAIL_OPTIONAL = Arg(("-e", "--email"), help="Email of the user", type=str)
ARG_PASSWORD = Arg(
("-p", "--password"),
help="Password of the user, required to create a user without --use-random-password",
type=str,
)
ARG_USE_RANDOM_PASSWORD = Arg(
("--use-random-password",),
help="Do not prompt for password. Use random string instead."
" Required to create a user without --password ",
default=False,
action="store_true",
)
ARG_USER_IMPORT = Arg(
("import",),
metavar="FILEPATH",
help="Import users from JSON file. Example format::\n"
+ textwrap.indent(
textwrap.dedent(
"""
[
{
"email": "[email protected]",
"firstname": "Jon",
"lastname": "Doe",
"roles": ["Public"],
"username": "jondoe"
}
]"""
),
" " * 4,
),
)
ARG_USER_EXPORT = Arg(("export",), metavar="FILEPATH", help="Export all users to JSON file")
# roles
ARG_CREATE_ROLE = Arg(("-c", "--create"), help="Create a new role", action="store_true")
ARG_LIST_ROLES = Arg(("-l", "--list"), help="List roles", action="store_true")
ARG_ROLES = Arg(("role",), help="The name of a role", nargs="*")
ARG_PERMISSIONS = Arg(("-p", "--permission"), help="Show role permissions", action="store_true")
ARG_ROLE_RESOURCE = Arg(("-r", "--resource"), help="The name of permissions", nargs="*", required=True)
ARG_ROLE_ACTION = Arg(("-a", "--action"), help="The action of permissions", nargs="*")
ARG_ROLE_ACTION_REQUIRED = Arg(("-a", "--action"), help="The action of permissions", nargs="*", required=True)
ARG_AUTOSCALE = Arg(("-a", "--autoscale"), help="Minimum and Maximum number of worker to autoscale")
ARG_SKIP_SERVE_LOGS = Arg(
("-s", "--skip-serve-logs"),
default=False,
help="Don't start the serve logs process along with the workers",
action="store_true",
)
ARG_ROLE_IMPORT = Arg(("file",), help="Import roles from JSON file", nargs=None)
ARG_ROLE_EXPORT = Arg(("file",), help="Export all roles to JSON file", nargs=None)
ARG_ROLE_EXPORT_FMT = Arg(
("-p", "--pretty"),
help="Format output JSON file by sorting role names and indenting by 4 spaces",
action="store_true",
)
# info
ARG_ANONYMIZE = Arg(
("--anonymize",),
help="Minimize any personal identifiable information. Use it when sharing output with others.",
action="store_true",
)
ARG_FILE_IO = Arg(
("--file-io",), help="Send output to file.io service and returns link.", action="store_true"
)
# config
ARG_SECTION = Arg(
("section",),
help="The section name",
)
ARG_OPTION = Arg(
("option",),
help="The option name",
)
ARG_OPTIONAL_SECTION = Arg(
("--section",),
help="The section name",
)
# kubernetes cleanup-pods
ARG_NAMESPACE = Arg(
("--namespace",),
default=conf.get("kubernetes_executor", "namespace"),
help="Kubernetes Namespace. Default value is `[kubernetes] namespace` in configuration.",
)
ARG_MIN_PENDING_MINUTES = Arg(
("--min-pending-minutes",),
default=30,
type=positive_int(allow_zero=False),
help=(
"Pending pods created before the time interval are to be cleaned up, "
"measured in minutes. Default value is 30(m). The minimum value is 5(m)."
),
)
# jobs check
ARG_JOB_TYPE_FILTER = Arg(
("--job-type",),
choices=("BackfillJob", "LocalTaskJob", "SchedulerJob", "TriggererJob", "DagProcessorJob"),
action="store",
help="The type of job(s) that will be checked.",
)
ARG_JOB_HOSTNAME_FILTER = Arg(
("--hostname",),
default=None,
type=str,
help="The hostname of job(s) that will be checked.",
)
ARG_JOB_HOSTNAME_CALLABLE_FILTER = Arg(
("--local",),
action="store_true",
help="If passed, this command will only show jobs from the local host "
"(those with a hostname matching what `hostname_callable` returns).",
)
ARG_JOB_LIMIT = Arg(
("--limit",),
default=1,
type=positive_int(allow_zero=True),
help="The number of recent jobs that will be checked. To disable limit, set 0. ",
)
ARG_ALLOW_MULTIPLE = Arg(
("--allow-multiple",),
action="store_true",
help="If passed, this command will be successful even if multiple matching alive jobs are found.",
)
# sync-perm
ARG_INCLUDE_DAGS = Arg(
("--include-dags",), help="If passed, DAG specific permissions will also be synced.", action="store_true"
)
# triggerer
ARG_CAPACITY = Arg(
("--capacity",),
type=positive_int(allow_zero=False),
help="The maximum number of triggers that a Triggerer will run at one time.",
)
# reserialize
ARG_CLEAR_ONLY = Arg(
("--clear-only",),
action="store_true",
help="If passed, serialized DAGs will be cleared but not reserialized.",
)
ALTERNATIVE_CONN_SPECS_ARGS = [
ARG_CONN_TYPE,
ARG_CONN_DESCRIPTION,
ARG_CONN_HOST,
ARG_CONN_LOGIN,
ARG_CONN_PASSWORD,
ARG_CONN_SCHEMA,
ARG_CONN_PORT,
]
class ActionCommand(NamedTuple):
"""Single CLI command."""
name: str
help: str
func: Callable
args: Iterable[Arg]
description: str | None = None
epilog: str | None = None
class GroupCommand(NamedTuple):
"""ClI command with subcommands."""
name: str
help: str
subcommands: Iterable
description: str | None = None
epilog: str | None = None
CLICommand = Union[ActionCommand, GroupCommand]
DAGS_COMMANDS = (
ActionCommand(
name="details",
help="Get DAG details given a DAG id",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_details"),
args=(ARG_DAG_ID, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="list",
help="List all the DAGs",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_list_dags"),
args=(ARG_SUBDIR, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="list-import-errors",
help="List all the DAGs that have import errors",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_list_import_errors"),
args=(ARG_SUBDIR, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="report",
help="Show DagBag loading report",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_report"),
args=(ARG_SUBDIR, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="list-runs",
help="List DAG runs given a DAG id",
description=(
"List DAG runs given a DAG id. If state option is given, it will only search for all the "
"dagruns with the given state. If no_backfill option is given, it will filter out all "
"backfill dagruns for given dag id. If start_date is given, it will filter out all the "
"dagruns that were executed before this date. If end_date is given, it will filter out "
"all the dagruns that were executed after this date. "
),
func=lazy_load_command("airflow.cli.commands.dag_command.dag_list_dag_runs"),
args=(
ARG_DAG_ID_REQ_FLAG,
ARG_NO_BACKFILL,
ARG_DR_STATE,
ARG_OUTPUT,
ARG_VERBOSE,
ARG_START_DATE,
ARG_END_DATE,
),
),
ActionCommand(
name="list-jobs",
help="List the jobs",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_list_jobs"),
args=(ARG_DAG_ID_OPT, ARG_JOB_STATE, ARG_LIMIT, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="state",
help="Get the status of a dag run",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_state"),
args=(ARG_DAG_ID, ARG_EXECUTION_DATE, ARG_SUBDIR, ARG_VERBOSE),
),
ActionCommand(
name="next-execution",
help="Get the next execution datetimes of a DAG",
description=(
"Get the next execution datetimes of a DAG. It returns one execution unless the "
"num-executions option is given"
),
func=lazy_load_command("airflow.cli.commands.dag_command.dag_next_execution"),
args=(ARG_DAG_ID, ARG_SUBDIR, ARG_NUM_EXECUTIONS, ARG_VERBOSE),
),
ActionCommand(
name="pause",
help="Pause a DAG",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_pause"),
args=(ARG_DAG_ID, ARG_SUBDIR, ARG_VERBOSE),
),
ActionCommand(
name="unpause",
help="Resume a paused DAG",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_unpause"),
args=(ARG_DAG_ID, ARG_SUBDIR, ARG_VERBOSE),
),
ActionCommand(
name="trigger",
help="Trigger a DAG run",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_trigger"),
args=(
ARG_DAG_ID,
ARG_SUBDIR,
ARG_RUN_ID,
ARG_CONF,
ARG_EXEC_DATE,
ARG_VERBOSE,
ARG_REPLACE_MICRO,
ARG_OUTPUT,
),
),
ActionCommand(
name="delete",
help="Delete all DB records related to the specified DAG",
func=lazy_load_command("airflow.cli.commands.dag_command.dag_delete"),
args=(ARG_DAG_ID, ARG_YES, ARG_VERBOSE),
),
ActionCommand(
name="show",
help="Displays DAG's tasks with their dependencies",
description=(
"The --imgcat option only works in iTerm.\n"
"\n"
"For more information, see: https://www.iterm2.com/documentation-images.html\n"
"\n"
"The --save option saves the result to the indicated file.\n"
"\n"
"The file format is determined by the file extension. "
"For more information about supported "
"format, see: https://www.graphviz.org/doc/info/output.html\n"
"\n"
"If you want to create a PNG file then you should execute the following command:\n"
"airflow dags show <DAG_ID> --save output.png\n"
"\n"
"If you want to create a DOT file then you should execute the following command:\n"
"airflow dags show <DAG_ID> --save output.dot\n"
),
func=lazy_load_command("airflow.cli.commands.dag_command.dag_show"),
args=(
ARG_DAG_ID,
ARG_SUBDIR,
ARG_SAVE,
ARG_IMGCAT,
ARG_VERBOSE,
),
),
ActionCommand(
name="show-dependencies",
help="Displays DAGs with their dependencies",
description=(
"The --imgcat option only works in iTerm.\n"
"\n"
"For more information, see: https://www.iterm2.com/documentation-images.html\n"
"\n"
"The --save option saves the result to the indicated file.\n"
"\n"
"The file format is determined by the file extension. "
"For more information about supported "
"format, see: https://www.graphviz.org/doc/info/output.html\n"
"\n"
"If you want to create a PNG file then you should execute the following command:\n"
"airflow dags show-dependencies --save output.png\n"
"\n"
"If you want to create a DOT file then you should execute the following command:\n"
"airflow dags show-dependencies --save output.dot\n"
),
func=lazy_load_command("airflow.cli.commands.dag_command.dag_dependencies_show"),
args=(
ARG_SUBDIR,
ARG_SAVE,
ARG_IMGCAT,
ARG_VERBOSE,
),
),
ActionCommand(
name="backfill",
help="Run subsections of a DAG for a specified date range",
description=(
"Run subsections of a DAG for a specified date range. If reset_dag_run option is used, "
"backfill will first prompt users whether airflow should clear all the previous dag_run and "
"task_instances within the backfill date range. If rerun_failed_tasks is used, backfill "
"will auto re-run the previous failed task instances within the backfill date range"
),
func=lazy_load_command("airflow.cli.commands.dag_command.dag_backfill"),
args=(
ARG_DAG_ID,
ARG_TASK_REGEX,
ARG_START_DATE,
ARG_END_DATE,
ARG_MARK_SUCCESS,
ARG_LOCAL,
ARG_DONOT_PICKLE,
ARG_YES,
ARG_CONTINUE_ON_FAILURES,
ARG_DISABLE_RETRY,
ARG_BF_IGNORE_DEPENDENCIES,
ARG_BF_IGNORE_FIRST_DEPENDS_ON_PAST,
ARG_SUBDIR,
ARG_POOL,
ARG_DELAY_ON_LIMIT,
ARG_DRY_RUN,
ARG_VERBOSE,
ARG_CONF,
ARG_RESET_DAG_RUN,
ARG_RERUN_FAILED_TASKS,
ARG_RUN_BACKWARDS,
ARG_TREAT_DAG_AS_REGEX,
),
),
ActionCommand(
name="test",
help="Execute one single DagRun",
description=(
"Execute one single DagRun for a given DAG and execution date.\n"
"\n"
"The --imgcat-dagrun option only works in iTerm.\n"
"\n"
"For more information, see: https://www.iterm2.com/documentation-images.html\n"
"\n"
"If --save-dagrun is used, then, after completing the backfill, saves the diagram "
"for current DAG Run to the indicated file.\n"
"The file format is determined by the file extension. "
"For more information about supported format, "
"see: https://www.graphviz.org/doc/info/output.html\n"
"\n"
"If you want to create a PNG file then you should execute the following command:\n"
"airflow dags test <DAG_ID> <EXECUTION_DATE> --save-dagrun output.png\n"
"\n"
"If you want to create a DOT file then you should execute the following command:\n"
"airflow dags test <DAG_ID> <EXECUTION_DATE> --save-dagrun output.dot\n"
),
func=lazy_load_command("airflow.cli.commands.dag_command.dag_test"),
args=(
ARG_DAG_ID,
ARG_EXECUTION_DATE_OPTIONAL,
ARG_CONF,
ARG_SUBDIR,
ARG_SHOW_DAGRUN,
ARG_IMGCAT_DAGRUN,
ARG_SAVE_DAGRUN,
ARG_VERBOSE,
),
),
ActionCommand(
name="reserialize",
help="Reserialize all DAGs by parsing the DagBag files",
description=(
"Drop all serialized dags from the metadata DB. This will cause all DAGs to be reserialized "
"from the DagBag folder. This can be helpful if your serialized DAGs get out of sync with the "
"version of Airflow that you are running."
),
func=lazy_load_command("airflow.cli.commands.dag_command.dag_reserialize"),
args=(
ARG_CLEAR_ONLY,
ARG_SUBDIR,
ARG_VERBOSE,
),
),
)
TASKS_COMMANDS = (
ActionCommand(
name="list",
help="List the tasks within a DAG",
func=lazy_load_command("airflow.cli.commands.task_command.task_list"),
args=(ARG_DAG_ID, ARG_TREE, ARG_SUBDIR, ARG_VERBOSE),
),
ActionCommand(
name="clear",
help="Clear a set of task instance, as if they never ran",
func=lazy_load_command("airflow.cli.commands.task_command.task_clear"),
args=(
ARG_DAG_ID,
ARG_TASK_REGEX,
ARG_START_DATE,
ARG_END_DATE,
ARG_SUBDIR,
ARG_UPSTREAM,
ARG_DOWNSTREAM,
ARG_YES,
ARG_ONLY_FAILED,
ARG_ONLY_RUNNING,
ARG_EXCLUDE_SUBDAGS,
ARG_EXCLUDE_PARENTDAG,
ARG_DAG_REGEX,
ARG_VERBOSE,
),
),
ActionCommand(
name="state",
help="Get the status of a task instance",
func=lazy_load_command("airflow.cli.commands.task_command.task_state"),
args=(
ARG_DAG_ID,
ARG_TASK_ID,
ARG_EXECUTION_DATE_OR_RUN_ID,
ARG_SUBDIR,
ARG_VERBOSE,
ARG_MAP_INDEX,
),
),
ActionCommand(
name="failed-deps",
help="Returns the unmet dependencies for a task instance",
description=(
"Returns the unmet dependencies for a task instance from the perspective of the scheduler. "
"In other words, why a task instance doesn't get scheduled and then queued by the scheduler, "
"and then run by an executor."
),
func=lazy_load_command("airflow.cli.commands.task_command.task_failed_deps"),
args=(ARG_DAG_ID, ARG_TASK_ID, ARG_EXECUTION_DATE_OR_RUN_ID, ARG_SUBDIR, ARG_MAP_INDEX, ARG_VERBOSE),
),
ActionCommand(
name="render",
help="Render a task instance's template(s)",
func=lazy_load_command("airflow.cli.commands.task_command.task_render"),
args=(
ARG_DAG_ID,
ARG_TASK_ID,
ARG_EXECUTION_DATE_OR_RUN_ID,
ARG_SUBDIR,
ARG_VERBOSE,
ARG_MAP_INDEX,
),
),
ActionCommand(
name="run",
help="Run a single task instance",
func=lazy_load_command("airflow.cli.commands.task_command.task_run"),
args=(
ARG_DAG_ID,
ARG_TASK_ID,
ARG_EXECUTION_DATE_OR_RUN_ID,
ARG_SUBDIR,
ARG_MARK_SUCCESS,
ARG_FORCE,
ARG_POOL,
ARG_CFG_PATH,
ARG_LOCAL,
ARG_RAW,
ARG_IGNORE_ALL_DEPENDENCIES,
ARG_IGNORE_DEPENDENCIES,
ARG_IGNORE_DEPENDS_ON_PAST,
ARG_DEPENDS_ON_PAST,
ARG_SHIP_DAG,
ARG_PICKLE,
ARG_JOB_ID,
ARG_INTERACTIVE,
ARG_SHUT_DOWN_LOGGING,
ARG_MAP_INDEX,
ARG_VERBOSE,
ARG_READ_FROM_DB,
),
),
ActionCommand(
name="test",
help="Test a task instance",
description=(
"Test a task instance. This will run a task without checking for dependencies or recording "
"its state in the database"
),
func=lazy_load_command("airflow.cli.commands.task_command.task_test"),
args=(
ARG_DAG_ID,
ARG_TASK_ID,
ARG_EXECUTION_DATE_OR_RUN_ID_OPTIONAL,
ARG_SUBDIR,
ARG_DRY_RUN,
ARG_TASK_PARAMS,
ARG_POST_MORTEM,
ARG_ENV_VARS,
ARG_MAP_INDEX,
ARG_VERBOSE,
),
),
ActionCommand(
name="states-for-dag-run",
help="Get the status of all task instances in a dag run",
func=lazy_load_command("airflow.cli.commands.task_command.task_states_for_dag_run"),
args=(ARG_DAG_ID, ARG_EXECUTION_DATE_OR_RUN_ID, ARG_OUTPUT, ARG_VERBOSE),
),
)
POOLS_COMMANDS = (
ActionCommand(
name="list",
help="List pools",
func=lazy_load_command("airflow.cli.commands.pool_command.pool_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="get",
help="Get pool size",
func=lazy_load_command("airflow.cli.commands.pool_command.pool_get"),
args=(ARG_POOL_NAME, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="set",
help="Configure pool",
func=lazy_load_command("airflow.cli.commands.pool_command.pool_set"),
args=(ARG_POOL_NAME, ARG_POOL_SLOTS, ARG_POOL_DESCRIPTION, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="delete",
help="Delete pool",
func=lazy_load_command("airflow.cli.commands.pool_command.pool_delete"),
args=(ARG_POOL_NAME, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="import",
help="Import pools",
func=lazy_load_command("airflow.cli.commands.pool_command.pool_import"),
args=(ARG_POOL_IMPORT, ARG_VERBOSE),
),
ActionCommand(
name="export",
help="Export all pools",
func=lazy_load_command("airflow.cli.commands.pool_command.pool_export"),
args=(ARG_POOL_EXPORT, ARG_VERBOSE),
),
)
VARIABLES_COMMANDS = (
ActionCommand(
name="list",
help="List variables",
func=lazy_load_command("airflow.cli.commands.variable_command.variables_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="get",
help="Get variable",
func=lazy_load_command("airflow.cli.commands.variable_command.variables_get"),
args=(ARG_VAR, ARG_DESERIALIZE_JSON, ARG_DEFAULT, ARG_VERBOSE),
),
ActionCommand(
name="set",
help="Set variable",
func=lazy_load_command("airflow.cli.commands.variable_command.variables_set"),
args=(ARG_VAR, ARG_VAR_VALUE, ARG_SERIALIZE_JSON, ARG_VERBOSE),
),
ActionCommand(
name="delete",
help="Delete variable",
func=lazy_load_command("airflow.cli.commands.variable_command.variables_delete"),
args=(ARG_VAR, ARG_VERBOSE),
),
ActionCommand(
name="import",
help="Import variables",
func=lazy_load_command("airflow.cli.commands.variable_command.variables_import"),
args=(ARG_VAR_IMPORT, ARG_VERBOSE),
),
ActionCommand(
name="export",
help="Export all variables",
func=lazy_load_command("airflow.cli.commands.variable_command.variables_export"),
args=(ARG_VAR_EXPORT, ARG_VERBOSE),
),
)
DB_COMMANDS = (
ActionCommand(
name="init",
help="Initialize the metadata database",
func=lazy_load_command("airflow.cli.commands.db_command.initdb"),
args=(ARG_VERBOSE,),
),
ActionCommand(
name="check-migrations",
help="Check if migration have finished",
description="Check if migration have finished (or continually check until timeout)",
func=lazy_load_command("airflow.cli.commands.db_command.check_migrations"),
args=(ARG_MIGRATION_TIMEOUT, ARG_VERBOSE),
),
ActionCommand(
name="reset",
help="Burn down and rebuild the metadata database",
func=lazy_load_command("airflow.cli.commands.db_command.resetdb"),
args=(ARG_YES, ARG_DB_SKIP_INIT, ARG_VERBOSE),
),
ActionCommand(
name="upgrade",
help="Upgrade the metadata database to latest version",
description=(
"Upgrade the schema of the metadata database. "
"To print but not execute commands, use option ``--show-sql-only``. "
"If using options ``--from-revision`` or ``--from-version``, you must also use "
"``--show-sql-only``, because if actually *running* migrations, we should only "
"migrate from the *current* Alembic revision."
),
func=lazy_load_command("airflow.cli.commands.db_command.upgradedb"),
args=(
ARG_DB_REVISION__UPGRADE,
ARG_DB_VERSION__UPGRADE,
ARG_DB_SQL_ONLY,
ARG_DB_FROM_REVISION,
ARG_DB_FROM_VERSION,
ARG_DB_RESERIALIZE_DAGS,
ARG_VERBOSE,
),
),
ActionCommand(
name="downgrade",
help="Downgrade the schema of the metadata database.",
description=(
"Downgrade the schema of the metadata database. "
"You must provide either `--to-revision` or `--to-version`. "
"To print but not execute commands, use option `--show-sql-only`. "
"If using options `--from-revision` or `--from-version`, you must also use `--show-sql-only`, "
"because if actually *running* migrations, we should only migrate from the *current* Alembic "
"revision."
),
func=lazy_load_command("airflow.cli.commands.db_command.downgrade"),
args=(
ARG_DB_REVISION__DOWNGRADE,
ARG_DB_VERSION__DOWNGRADE,
ARG_DB_SQL_ONLY,
ARG_YES,
ARG_DB_FROM_REVISION,
ARG_DB_FROM_VERSION,
ARG_VERBOSE,
),
),
ActionCommand(
name="shell",
help="Runs a shell to access the database",
func=lazy_load_command("airflow.cli.commands.db_command.shell"),
args=(ARG_VERBOSE,),
),
ActionCommand(
name="check",
help="Check if the database can be reached",
func=lazy_load_command("airflow.cli.commands.db_command.check"),
args=(ARG_VERBOSE, ARG_DB_RETRY, ARG_DB_RETRY_DELAY),
),
ActionCommand(
name="clean",
help="Purge old records in metastore tables",
func=lazy_load_command("airflow.cli.commands.db_command.cleanup_tables"),
args=(
ARG_DB_TABLES,
ARG_DB_DRY_RUN,
ARG_DB_CLEANUP_TIMESTAMP,
ARG_VERBOSE,
ARG_YES,
ARG_DB_SKIP_ARCHIVE,
),
),
ActionCommand(
name="export-archived",
help="Export archived data from the archive tables",
func=lazy_load_command("airflow.cli.commands.db_command.export_archived"),
args=(
ARG_DB_EXPORT_FORMAT,
ARG_DB_OUTPUT_PATH,
ARG_DB_DROP_ARCHIVES,
ARG_DB_TABLES,
ARG_YES,
),
),
ActionCommand(
name="drop-archived",
help="Drop archived tables created through the db clean command",
func=lazy_load_command("airflow.cli.commands.db_command.drop_archived"),
args=(ARG_DB_TABLES, ARG_YES),
),
)
CONNECTIONS_COMMANDS = (
ActionCommand(
name="get",
help="Get a connection",
func=lazy_load_command("airflow.cli.commands.connection_command.connections_get"),
args=(ARG_CONN_ID, ARG_COLOR, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="list",
help="List connections",
func=lazy_load_command("airflow.cli.commands.connection_command.connections_list"),
args=(ARG_OUTPUT, ARG_VERBOSE, ARG_CONN_ID_FILTER),
),
ActionCommand(
name="add",
help="Add a connection",
func=lazy_load_command("airflow.cli.commands.connection_command.connections_add"),
args=(ARG_CONN_ID, ARG_CONN_URI, ARG_CONN_JSON, ARG_CONN_EXTRA) + tuple(ALTERNATIVE_CONN_SPECS_ARGS),
),
ActionCommand(
name="delete",
help="Delete a connection",
func=lazy_load_command("airflow.cli.commands.connection_command.connections_delete"),
args=(ARG_CONN_ID, ARG_COLOR, ARG_VERBOSE),
),
ActionCommand(
name="export",
help="Export all connections",
description=(
"All connections can be exported in STDOUT using the following command:\n"
"airflow connections export -\n"
"The file format can be determined by the provided file extension. E.g., The following "
"command will export the connections in JSON format:\n"
"airflow connections export /tmp/connections.json\n"
"The --file-format parameter can be used to control the file format. E.g., "
"the default format is JSON in STDOUT mode, which can be overridden using: \n"
"airflow connections export - --file-format yaml\n"
"The --file-format parameter can also be used for the files, for example:\n"
"airflow connections export /tmp/connections --file-format json.\n"
"When exporting in `env` file format, you control whether URI format or JSON format "
"is used to serialize the connection by passing `uri` or `json` with option "
"`--serialization-format`.\n"
),
func=lazy_load_command("airflow.cli.commands.connection_command.connections_export"),
args=(
ARG_CONN_EXPORT,
ARG_CONN_EXPORT_FORMAT,
ARG_CONN_EXPORT_FILE_FORMAT,
ARG_CONN_SERIALIZATION_FORMAT,
ARG_VERBOSE,
),
),
ActionCommand(
name="import",
help="Import connections from a file",
description=(
"Connections can be imported from the output of the export command.\n"
"The filetype must by json, yaml or env and will be automatically inferred."
),
func=lazy_load_command("airflow.cli.commands.connection_command.connections_import"),
args=(
ARG_CONN_IMPORT,
ARG_CONN_OVERWRITE,
ARG_VERBOSE,
),
),
ActionCommand(
name="test",
help="Test a connection",
func=lazy_load_command("airflow.cli.commands.connection_command.connections_test"),
args=(ARG_CONN_ID, ARG_VERBOSE),
),
)
PROVIDERS_COMMANDS = (
ActionCommand(
name="list",
help="List installed providers",
func=lazy_load_command("airflow.cli.commands.provider_command.providers_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="get",
help="Get detailed information about a provider",
func=lazy_load_command("airflow.cli.commands.provider_command.provider_get"),
args=(ARG_OUTPUT, ARG_VERBOSE, ARG_FULL, ARG_COLOR, ARG_PROVIDER_NAME),
),
ActionCommand(
name="links",
help="List extra links registered by the providers",
func=lazy_load_command("airflow.cli.commands.provider_command.extra_links_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="widgets",
help="Get information about registered connection form widgets",
func=lazy_load_command("airflow.cli.commands.provider_command.connection_form_widget_list"),
args=(
ARG_OUTPUT,
ARG_VERBOSE,
),
),
ActionCommand(
name="hooks",
help="List registered provider hooks",
func=lazy_load_command("airflow.cli.commands.provider_command.hooks_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="triggers",
help="List registered provider triggers",
func=lazy_load_command("airflow.cli.commands.provider_command.triggers_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="behaviours",
help="Get information about registered connection types with custom behaviours",
func=lazy_load_command("airflow.cli.commands.provider_command.connection_field_behaviours"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="logging",
help="Get information about task logging handlers provided",
func=lazy_load_command("airflow.cli.commands.provider_command.logging_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="secrets",
help="Get information about secrets backends provided",
func=lazy_load_command("airflow.cli.commands.provider_command.secrets_backends_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="auth",
help="Get information about API auth backends provided",
func=lazy_load_command("airflow.cli.commands.provider_command.auth_backend_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="executors",
help="Get information about executors provided",
func=lazy_load_command("airflow.cli.commands.provider_command.executors_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
)
USERS_COMMANDS = (
ActionCommand(
name="list",
help="List users",
func=lazy_load_command("airflow.cli.commands.user_command.users_list"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="create",
help="Create a user",
func=lazy_load_command("airflow.cli.commands.user_command.users_create"),
args=(
ARG_ROLE,
ARG_USERNAME,
ARG_EMAIL,
ARG_FIRSTNAME,
ARG_LASTNAME,
ARG_PASSWORD,
ARG_USE_RANDOM_PASSWORD,
ARG_VERBOSE,
),
epilog=(
"examples:\n"
'To create an user with "Admin" role and username equals to "admin", run:\n'
"\n"
" $ airflow users create \\\n"
" --username admin \\\n"
" --firstname FIRST_NAME \\\n"
" --lastname LAST_NAME \\\n"
" --role Admin \\\n"
" --email [email protected]"
),
),
ActionCommand(
name="delete",
help="Delete a user",
func=lazy_load_command("airflow.cli.commands.user_command.users_delete"),
args=(ARG_USERNAME_OPTIONAL, ARG_EMAIL_OPTIONAL, ARG_VERBOSE),
),
ActionCommand(
name="add-role",
help="Add role to a user",
func=lazy_load_command("airflow.cli.commands.user_command.add_role"),
args=(ARG_USERNAME_OPTIONAL, ARG_EMAIL_OPTIONAL, ARG_ROLE, ARG_VERBOSE),
),
ActionCommand(
name="remove-role",
help="Remove role from a user",
func=lazy_load_command("airflow.cli.commands.user_command.remove_role"),
args=(ARG_USERNAME_OPTIONAL, ARG_EMAIL_OPTIONAL, ARG_ROLE, ARG_VERBOSE),
),
ActionCommand(
name="import",
help="Import users",
func=lazy_load_command("airflow.cli.commands.user_command.users_import"),
args=(ARG_USER_IMPORT, ARG_VERBOSE),
),
ActionCommand(
name="export",
help="Export all users",
func=lazy_load_command("airflow.cli.commands.user_command.users_export"),
args=(ARG_USER_EXPORT, ARG_VERBOSE),
),
)
ROLES_COMMANDS = (
ActionCommand(
name="list",
help="List roles",
func=lazy_load_command("airflow.cli.commands.role_command.roles_list"),
args=(ARG_PERMISSIONS, ARG_OUTPUT, ARG_VERBOSE),
),
ActionCommand(
name="create",
help="Create role",
func=lazy_load_command("airflow.cli.commands.role_command.roles_create"),
args=(ARG_ROLES, ARG_VERBOSE),
),
ActionCommand(
name="delete",
help="Delete role",
func=lazy_load_command("airflow.cli.commands.role_command.roles_delete"),
args=(ARG_ROLES, ARG_VERBOSE),
),
ActionCommand(
name="add-perms",
help="Add roles permissions",
func=lazy_load_command("airflow.cli.commands.role_command.roles_add_perms"),
args=(ARG_ROLES, ARG_ROLE_RESOURCE, ARG_ROLE_ACTION_REQUIRED, ARG_VERBOSE),
),
ActionCommand(
name="del-perms",
help="Delete roles permissions",
func=lazy_load_command("airflow.cli.commands.role_command.roles_del_perms"),
args=(ARG_ROLES, ARG_ROLE_RESOURCE, ARG_ROLE_ACTION, ARG_VERBOSE),
),
ActionCommand(
name="export",
help="Export roles (without permissions) from db to JSON file",
func=lazy_load_command("airflow.cli.commands.role_command.roles_export"),
args=(ARG_ROLE_EXPORT, ARG_ROLE_EXPORT_FMT, ARG_VERBOSE),
),
ActionCommand(
name="import",
help="Import roles (without permissions) from JSON file to db",
func=lazy_load_command("airflow.cli.commands.role_command.roles_import"),
args=(ARG_ROLE_IMPORT, ARG_VERBOSE),
),
)
CELERY_COMMANDS = (
ActionCommand(
name="worker",
help="Start a Celery worker node",
func=lazy_load_command("airflow.cli.commands.celery_command.worker"),
args=(
ARG_QUEUES,
ARG_CONCURRENCY,
ARG_CELERY_HOSTNAME,
ARG_PID,
ARG_DAEMON,
ARG_UMASK,
ARG_STDOUT,
ARG_STDERR,
ARG_LOG_FILE,
ARG_AUTOSCALE,
ARG_SKIP_SERVE_LOGS,
ARG_WITHOUT_MINGLE,
ARG_WITHOUT_GOSSIP,
ARG_VERBOSE,
),
),
ActionCommand(
name="flower",
help="Start a Celery Flower",
func=lazy_load_command("airflow.cli.commands.celery_command.flower"),
args=(
ARG_FLOWER_HOSTNAME,
ARG_FLOWER_PORT,
ARG_FLOWER_CONF,
ARG_FLOWER_URL_PREFIX,
ARG_FLOWER_BASIC_AUTH,
ARG_BROKER_API,
ARG_PID,
ARG_DAEMON,
ARG_STDOUT,
ARG_STDERR,
ARG_LOG_FILE,
ARG_VERBOSE,
),
),
ActionCommand(
name="stop",
help="Stop the Celery worker gracefully",
func=lazy_load_command("airflow.cli.commands.celery_command.stop_worker"),
args=(ARG_PID, ARG_VERBOSE),
),
)
CONFIG_COMMANDS = (
ActionCommand(
name="get-value",
help="Print the value of the configuration",
func=lazy_load_command("airflow.cli.commands.config_command.get_value"),
args=(
ARG_SECTION,
ARG_OPTION,
ARG_VERBOSE,
),
),
ActionCommand(
name="list",
help="List options for the configuration",
func=lazy_load_command("airflow.cli.commands.config_command.show_config"),
args=(ARG_OPTIONAL_SECTION, ARG_COLOR, ARG_VERBOSE),
),
)
KUBERNETES_COMMANDS = (
ActionCommand(
name="cleanup-pods",
help=(
"Clean up Kubernetes pods "
"(created by KubernetesExecutor/KubernetesPodOperator) "
"in evicted/failed/succeeded/pending states"
),
func=lazy_load_command("airflow.cli.commands.kubernetes_command.cleanup_pods"),
args=(ARG_NAMESPACE, ARG_MIN_PENDING_MINUTES, ARG_VERBOSE),
),
ActionCommand(
name="generate-dag-yaml",
help="Generate YAML files for all tasks in DAG. Useful for debugging tasks without "
"launching into a cluster",
func=lazy_load_command("airflow.cli.commands.kubernetes_command.generate_pod_yaml"),
args=(ARG_DAG_ID, ARG_EXECUTION_DATE, ARG_SUBDIR, ARG_OUTPUT_PATH, ARG_VERBOSE),
),
)
JOBS_COMMANDS = (
ActionCommand(
name="check",
help="Checks if job(s) are still alive",
func=lazy_load_command("airflow.cli.commands.jobs_command.check"),
args=(
ARG_JOB_TYPE_FILTER,
ARG_JOB_HOSTNAME_FILTER,
ARG_JOB_HOSTNAME_CALLABLE_FILTER,
ARG_JOB_LIMIT,
ARG_ALLOW_MULTIPLE,
ARG_VERBOSE,
),
epilog=(
"examples:\n"
"To check if the local scheduler is still working properly, run:\n"
"\n"
' $ airflow jobs check --job-type SchedulerJob --local"\n'
"\n"
"To check if any scheduler is running when you are using high availability, run:\n"
"\n"
" $ airflow jobs check --job-type SchedulerJob --allow-multiple --limit 100"
),
),
)
core_commands: list[CLICommand] = [
GroupCommand(
name="dags",
help="Manage DAGs",
subcommands=DAGS_COMMANDS,
),
GroupCommand(
name="kubernetes", help="Tools to help run the KubernetesExecutor", subcommands=KUBERNETES_COMMANDS
),
GroupCommand(
name="tasks",
help="Manage tasks",
subcommands=TASKS_COMMANDS,
),
GroupCommand(
name="pools",
help="Manage pools",
subcommands=POOLS_COMMANDS,
),
GroupCommand(
name="variables",
help="Manage variables",
subcommands=VARIABLES_COMMANDS,
),
GroupCommand(
name="jobs",
help="Manage jobs",
subcommands=JOBS_COMMANDS,
),
GroupCommand(
name="db",
help="Database operations",
subcommands=DB_COMMANDS,
),
ActionCommand(
name="kerberos",
help="Start a kerberos ticket renewer",
func=lazy_load_command("airflow.cli.commands.kerberos_command.kerberos"),
args=(
ARG_PRINCIPAL,
ARG_KEYTAB,
ARG_PID,
ARG_DAEMON,
ARG_STDOUT,
ARG_STDERR,
ARG_LOG_FILE,
ARG_VERBOSE,
),
),
ActionCommand(
name="webserver",
help="Start a Airflow webserver instance",
func=lazy_load_command("airflow.cli.commands.webserver_command.webserver"),
args=(
ARG_PORT,
ARG_WORKERS,
ARG_WORKERCLASS,
ARG_WORKER_TIMEOUT,
ARG_HOSTNAME,
ARG_PID,
ARG_DAEMON,
ARG_STDOUT,
ARG_STDERR,
ARG_ACCESS_LOGFILE,
ARG_ERROR_LOGFILE,
ARG_ACCESS_LOGFORMAT,
ARG_LOG_FILE,
ARG_SSL_CERT,
ARG_SSL_KEY,
ARG_DEBUG,
),
),
ActionCommand(
name="scheduler",
help="Start a scheduler instance",
func=lazy_load_command("airflow.cli.commands.scheduler_command.scheduler"),
args=(
ARG_SUBDIR,
ARG_NUM_RUNS,
ARG_DO_PICKLE,
ARG_PID,
ARG_DAEMON,
ARG_STDOUT,
ARG_STDERR,
ARG_LOG_FILE,
ARG_SKIP_SERVE_LOGS,
ARG_VERBOSE,
),
epilog=(
"Signals:\n"
"\n"
" - SIGUSR2: Dump a snapshot of task state being tracked by the executor.\n"
"\n"
" Example:\n"
' pkill -f -USR2 "airflow scheduler"'
),
),
ActionCommand(
name="triggerer",
help="Start a triggerer instance",
func=lazy_load_command("airflow.cli.commands.triggerer_command.triggerer"),
args=(
ARG_PID,
ARG_DAEMON,
ARG_STDOUT,
ARG_STDERR,
ARG_LOG_FILE,
ARG_CAPACITY,
ARG_VERBOSE,
ARG_SKIP_SERVE_LOGS,
),
),
ActionCommand(
name="dag-processor",
help="Start a standalone Dag Processor instance",
func=lazy_load_command("airflow.cli.commands.dag_processor_command.dag_processor"),
args=(
ARG_PID,
ARG_DAEMON,
ARG_SUBDIR,
ARG_NUM_RUNS,
ARG_DO_PICKLE,
ARG_STDOUT,
ARG_STDERR,
ARG_LOG_FILE,
ARG_VERBOSE,
),
),
ActionCommand(
name="version",
help="Show the version",
func=lazy_load_command("airflow.cli.commands.version_command.version"),
args=(),
),
ActionCommand(
name="cheat-sheet",
help="Display cheat sheet",
func=lazy_load_command("airflow.cli.commands.cheat_sheet_command.cheat_sheet"),
args=(ARG_VERBOSE,),
),
GroupCommand(
name="connections",
help="Manage connections",
subcommands=CONNECTIONS_COMMANDS,
),
GroupCommand(
name="providers",
help="Display providers",
subcommands=PROVIDERS_COMMANDS,
),
GroupCommand(
name="users",
help="Manage users",
subcommands=USERS_COMMANDS,
),
GroupCommand(
name="roles",
help="Manage roles",
subcommands=ROLES_COMMANDS,
),
ActionCommand(
name="sync-perm",
help="Update permissions for existing roles and optionally DAGs",
func=lazy_load_command("airflow.cli.commands.sync_perm_command.sync_perm"),
args=(ARG_INCLUDE_DAGS, ARG_VERBOSE),
),
ActionCommand(
name="rotate-fernet-key",
func=lazy_load_command("airflow.cli.commands.rotate_fernet_key_command.rotate_fernet_key"),
help="Rotate encrypted connection credentials and variables",
description=(
"Rotate all encrypted connection credentials and variables; see "
"https://airflow.apache.org/docs/apache-airflow/stable/howto/secure-connections.html"
"#rotating-encryption-keys"
),
args=(),
),
GroupCommand(name="config", help="View configuration", subcommands=CONFIG_COMMANDS),
ActionCommand(
name="info",
help="Show information about current Airflow and environment",
func=lazy_load_command("airflow.cli.commands.info_command.show_info"),
args=(
ARG_ANONYMIZE,
ARG_FILE_IO,
ARG_VERBOSE,
ARG_OUTPUT,
),
),
ActionCommand(
name="plugins",
help="Dump information about loaded plugins",
func=lazy_load_command("airflow.cli.commands.plugins_command.dump_plugins"),
args=(ARG_OUTPUT, ARG_VERBOSE),
),
GroupCommand(
name="celery",
help="Celery components",
description=(
"Start celery components. Works only when using CeleryExecutor. For more information, see "
"https://airflow.apache.org/docs/apache-airflow/stable/executor/celery.html"
),
subcommands=CELERY_COMMANDS,
),
ActionCommand(
name="standalone",
help="Run an all-in-one copy of Airflow",
func=lazy_load_command("airflow.cli.commands.standalone_command.standalone"),
args=tuple(),
),
]
if _ENABLE_AIP_44:
core_commands.append(
ActionCommand(
name="internal-api",
help="Start a Airflow Internal API instance",
func=lazy_load_command("airflow.cli.commands.internal_api_command.internal_api"),
args=(
ARG_INTERNAL_API_PORT,
ARG_INTERNAL_API_WORKERS,
ARG_INTERNAL_API_WORKERCLASS,
ARG_INTERNAL_API_WORKER_TIMEOUT,
ARG_INTERNAL_API_HOSTNAME,
ARG_PID,
ARG_DAEMON,
ARG_STDOUT,
ARG_STDERR,
ARG_INTERNAL_API_ACCESS_LOGFILE,
ARG_INTERNAL_API_ERROR_LOGFILE,
ARG_INTERNAL_API_ACCESS_LOGFORMAT,
ARG_LOG_FILE,
ARG_SSL_CERT,
ARG_SSL_KEY,
ARG_DEBUG,
),
),
)
def _remove_dag_id_opt(command: ActionCommand):
cmd = command._asdict()
cmd["args"] = (arg for arg in command.args if arg is not ARG_DAG_ID)
return ActionCommand(**cmd)
dag_cli_commands: list[CLICommand] = [
GroupCommand(
name="dags",
help="Manage DAGs",
subcommands=[
_remove_dag_id_opt(sp)
for sp in DAGS_COMMANDS
if sp.name in ["backfill", "list-runs", "pause", "unpause", "test"]
],
),
GroupCommand(
name="tasks",
help="Manage tasks",
subcommands=[_remove_dag_id_opt(sp) for sp in TASKS_COMMANDS if sp.name in ["list", "test", "run"]],
),
]
DAG_CLI_DICT: dict[str, CLICommand] = {sp.name: sp for sp in dag_cli_commands}
| 79,791 | 33.527045 | 110 | py |
airflow | airflow-main/airflow/cli/cli_parser.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Produce a CLI parser object from Airflow CLI command configuration.
.. seealso:: :mod:`airflow.cli.cli_config`
"""
from __future__ import annotations
import argparse
from argparse import Action
from functools import lru_cache
from typing import Iterable
import lazy_object_proxy
from rich_argparse import RawTextRichHelpFormatter, RichHelpFormatter
from airflow.cli.cli_config import (
DAG_CLI_DICT,
ActionCommand,
Arg,
CLICommand,
DefaultHelpParser,
GroupCommand,
core_commands,
)
from airflow.exceptions import AirflowException
from airflow.utils.helpers import partition
airflow_commands = core_commands
ALL_COMMANDS_DICT: dict[str, CLICommand] = {sp.name: sp for sp in airflow_commands}
class AirflowHelpFormatter(RichHelpFormatter):
"""
Custom help formatter to display help message.
It displays simple commands and groups of commands in separate sections.
"""
def _iter_indented_subactions(self, action: Action):
if isinstance(action, argparse._SubParsersAction):
self._indent()
subactions = action._get_subactions()
action_subcommands, group_subcommands = partition(
lambda d: isinstance(ALL_COMMANDS_DICT[d.dest], GroupCommand), subactions
)
yield Action([], "\n%*s%s:" % (self._current_indent, "", "Groups"), nargs=0)
self._indent()
yield from group_subcommands
self._dedent()
yield Action([], "\n%*s%s:" % (self._current_indent, "", "Commands"), nargs=0)
self._indent()
yield from action_subcommands
self._dedent()
self._dedent()
else:
yield from super()._iter_indented_subactions(action)
class LazyRichHelpFormatter(RawTextRichHelpFormatter):
"""
Custom help formatter to display help message.
It resolves lazy help string before printing it using rich.
"""
def add_argument(self, action: Action) -> None:
if isinstance(action.help, lazy_object_proxy.Proxy):
action.help = str(action.help)
return super().add_argument(action)
@lru_cache(maxsize=None)
def get_parser(dag_parser: bool = False) -> argparse.ArgumentParser:
"""Creates and returns command line argument parser."""
parser = DefaultHelpParser(prog="airflow", formatter_class=AirflowHelpFormatter)
subparsers = parser.add_subparsers(dest="subcommand", metavar="GROUP_OR_COMMAND")
subparsers.required = True
command_dict = DAG_CLI_DICT if dag_parser else ALL_COMMANDS_DICT
subparser_list = command_dict.keys()
sub_name: str
for sub_name in sorted(subparser_list):
sub: CLICommand = command_dict[sub_name]
_add_command(subparsers, sub)
return parser
def _sort_args(args: Iterable[Arg]) -> Iterable[Arg]:
"""Sort subcommand optional args, keep positional args."""
def get_long_option(arg: Arg):
"""Get long option from Arg.flags."""
return arg.flags[0] if len(arg.flags) == 1 else arg.flags[1]
positional, optional = partition(lambda x: x.flags[0].startswith("-"), args)
yield from positional
yield from sorted(optional, key=lambda x: get_long_option(x).lower())
def _add_command(subparsers: argparse._SubParsersAction, sub: CLICommand) -> None:
sub_proc = subparsers.add_parser(
sub.name, help=sub.help, description=sub.description or sub.help, epilog=sub.epilog
)
sub_proc.formatter_class = LazyRichHelpFormatter
if isinstance(sub, GroupCommand):
_add_group_command(sub, sub_proc)
elif isinstance(sub, ActionCommand):
_add_action_command(sub, sub_proc)
else:
raise AirflowException("Invalid command definition.")
def _add_action_command(sub: ActionCommand, sub_proc: argparse.ArgumentParser) -> None:
for arg in _sort_args(sub.args):
arg.add_to_parser(sub_proc)
sub_proc.set_defaults(func=sub.func)
def _add_group_command(sub: GroupCommand, sub_proc: argparse.ArgumentParser) -> None:
subcommands = sub.subcommands
sub_subparsers = sub_proc.add_subparsers(dest="subcommand", metavar="COMMAND")
sub_subparsers.required = True
for command in sorted(subcommands, key=lambda x: x.name):
_add_command(sub_subparsers, command)
| 5,114 | 33.560811 | 91 | py |
airflow | airflow-main/airflow/cli/commands/triggerer_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Triggerer command."""
from __future__ import annotations
import signal
from contextlib import contextmanager
from functools import partial
from multiprocessing import Process
from typing import Generator
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.configuration import conf
from airflow.jobs.job import Job, run_job
from airflow.jobs.triggerer_job_runner import TriggererJobRunner
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging, sigint_handler, sigquit_handler
from airflow.utils.serve_logs import serve_logs
@contextmanager
def _serve_logs(skip_serve_logs: bool = False) -> Generator[None, None, None]:
"""Starts serve_logs sub-process."""
sub_proc = None
if skip_serve_logs is False:
port = conf.getint("logging", "trigger_log_server_port", fallback=8794)
sub_proc = Process(target=partial(serve_logs, port=port))
sub_proc.start()
try:
yield
finally:
if sub_proc:
sub_proc.terminate()
@cli_utils.action_cli
def triggerer(args):
"""Starts Airflow Triggerer."""
settings.MASK_SECRETS_IN_LOGS = True
print(settings.HEADER)
triggerer_heartrate = conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC")
triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations(
"triggerer", args.pid, args.stdout, args.stderr, args.log_file
)
handle = setup_logging(log_file)
with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle:
stdout_handle.truncate(0)
stderr_handle.truncate(0)
daemon_context = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout_handle,
stderr=stderr_handle,
umask=int(settings.DAEMON_UMASK, 8),
)
with daemon_context, _serve_logs(args.skip_serve_logs):
run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
with _serve_logs(args.skip_serve_logs):
run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)
| 3,342 | 38.329412 | 109 | py |
airflow | airflow-main/airflow/cli/commands/scheduler_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Scheduler command."""
from __future__ import annotations
import signal
from contextlib import contextmanager
from multiprocessing import Process
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.api_internal.internal_api_call import InternalApiConfig
from airflow.configuration import conf
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.job import Job, run_job
from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
from airflow.utils import cli as cli_utils
from airflow.utils.cli import process_subdir, setup_locations, setup_logging, sigint_handler, sigquit_handler
from airflow.utils.scheduler_health import serve_health_check
def _run_scheduler_job(job_runner: SchedulerJobRunner, *, skip_serve_logs: bool) -> None:
InternalApiConfig.force_database_direct_access()
enable_health_check = conf.getboolean("scheduler", "ENABLE_HEALTH_CHECK")
with _serve_logs(skip_serve_logs), _serve_health_check(enable_health_check):
run_job(job=job_runner.job, execute_callable=job_runner._execute)
@cli_utils.action_cli
def scheduler(args):
"""Starts Airflow Scheduler."""
print(settings.HEADER)
job_runner = SchedulerJobRunner(
job=Job(), subdir=process_subdir(args.subdir), num_runs=args.num_runs, do_pickle=args.do_pickle
)
ExecutorLoader.validate_database_executor_compatibility(job_runner.job.executor)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations(
"scheduler", args.pid, args.stdout, args.stderr, args.log_file
)
handle = setup_logging(log_file)
with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle:
stdout_handle.truncate(0)
stderr_handle.truncate(0)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout_handle,
stderr=stderr_handle,
umask=int(settings.DAEMON_UMASK, 8),
)
with ctx:
_run_scheduler_job(job_runner, skip_serve_logs=args.skip_serve_logs)
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
_run_scheduler_job(job_runner, skip_serve_logs=args.skip_serve_logs)
@contextmanager
def _serve_logs(skip_serve_logs: bool = False):
"""Starts serve_logs sub-process."""
from airflow.utils.serve_logs import serve_logs
sub_proc = None
executor_class, _ = ExecutorLoader.import_default_executor_cls()
if executor_class.serve_logs:
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
yield
if sub_proc:
sub_proc.terminate()
@contextmanager
def _serve_health_check(enable_health_check: bool = False):
"""Starts serve_health_check sub-process."""
sub_proc = None
if enable_health_check:
sub_proc = Process(target=serve_health_check)
sub_proc.start()
yield
if sub_proc:
sub_proc.terminate()
| 3,999 | 36.735849 | 109 | py |
airflow | airflow-main/airflow/cli/commands/provider_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Providers sub-commands."""
from __future__ import annotations
import re2
from airflow.cli.simple_table import AirflowConsole
from airflow.providers_manager import ProvidersManager
from airflow.utils.cli import suppress_logs_and_warning
ERROR_IMPORTING_HOOK = "Error when importing hook!"
def _remove_rst_syntax(value: str) -> str:
return re2.sub("[`_<>]", "", value.strip(" \n."))
@suppress_logs_and_warning
def provider_get(args):
"""Get a provider info."""
providers = ProvidersManager().providers
if args.provider_name in providers:
provider_version = providers[args.provider_name].version
provider_info = providers[args.provider_name].data
if args.full:
provider_info["description"] = _remove_rst_syntax(provider_info["description"])
AirflowConsole().print_as(
data=[provider_info],
output=args.output,
)
else:
AirflowConsole().print_as(
data=[{"Provider": args.provider_name, "Version": provider_version}], output=args.output
)
else:
raise SystemExit(f"No such provider installed: {args.provider_name}")
@suppress_logs_and_warning
def providers_list(args):
"""Lists all providers at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().providers.values()),
output=args.output,
mapper=lambda x: {
"package_name": x.data["package-name"],
"description": _remove_rst_syntax(x.data["description"]),
"version": x.version,
},
)
@suppress_logs_and_warning
def hooks_list(args):
"""Lists all hooks at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().hooks.items()),
output=args.output,
mapper=lambda x: {
"connection_type": x[0],
"class": x[1].hook_class_name if x[1] else ERROR_IMPORTING_HOOK,
"conn_id_attribute_name": x[1].connection_id_attribute_name if x[1] else ERROR_IMPORTING_HOOK,
"package_name": x[1].package_name if x[1] else ERROR_IMPORTING_HOOK,
"hook_name": x[1].hook_name if x[1] else ERROR_IMPORTING_HOOK,
},
)
@suppress_logs_and_warning
def triggers_list(args):
AirflowConsole().print_as(
data=ProvidersManager().trigger,
output=args.output,
mapper=lambda x: {
"package_name": x.package_name,
"class": x.trigger_class_name,
"integration_name": x.integration_name,
},
)
@suppress_logs_and_warning
def connection_form_widget_list(args):
"""Lists all custom connection form fields at the command line."""
AirflowConsole().print_as(
data=list(sorted(ProvidersManager().connection_form_widgets.items())),
output=args.output,
mapper=lambda x: {
"connection_parameter_name": x[0],
"class": x[1].hook_class_name,
"package_name": x[1].package_name,
"field_type": x[1].field.field_class.__name__,
},
)
@suppress_logs_and_warning
def connection_field_behaviours(args):
"""Lists field behaviours."""
AirflowConsole().print_as(
data=list(ProvidersManager().field_behaviours.keys()),
output=args.output,
mapper=lambda x: {
"field_behaviours": x,
},
)
@suppress_logs_and_warning
def extra_links_list(args):
"""Lists all extra links at the command line."""
AirflowConsole().print_as(
data=ProvidersManager().extra_links_class_names,
output=args.output,
mapper=lambda x: {
"extra_link_class_name": x,
},
)
@suppress_logs_and_warning
def logging_list(args):
"""Lists all log task handlers at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().logging_class_names),
output=args.output,
mapper=lambda x: {
"logging_class_name": x,
},
)
@suppress_logs_and_warning
def secrets_backends_list(args):
"""Lists all secrets backends at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().secrets_backend_class_names),
output=args.output,
mapper=lambda x: {
"secrets_backend_class_name": x,
},
)
@suppress_logs_and_warning
def auth_backend_list(args):
"""Lists all API auth backend modules at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().auth_backend_module_names),
output=args.output,
mapper=lambda x: {
"api_auth_backand_module": x,
},
)
@suppress_logs_and_warning
def executors_list(args):
"""Lists all executors at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().executor_class_names),
output=args.output,
mapper=lambda x: {
"executor_class_names": x,
},
)
| 5,781 | 30.769231 | 106 | py |
airflow | airflow-main/airflow/cli/commands/jobs_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import select
from sqlalchemy.orm import Session
from airflow.jobs.job import Job
from airflow.utils.net import get_hostname
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import JobState
@provide_session
def check(args, session: Session = NEW_SESSION) -> None:
"""Checks if job(s) are still alive."""
if args.allow_multiple and not args.limit > 1:
raise SystemExit("To use option --allow-multiple, you must set the limit to a value greater than 1.")
if args.hostname and args.local:
raise SystemExit("You can't use --hostname and --local at the same time")
query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc())
if args.job_type:
query = query.where(Job.job_type == args.job_type)
if args.hostname:
query = query.where(Job.hostname == args.hostname)
if args.local:
query = query.where(Job.hostname == get_hostname())
if args.limit > 0:
query = query.limit(args.limit)
alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()]
count_alive_jobs = len(alive_jobs)
if count_alive_jobs == 0:
raise SystemExit("No alive jobs found.")
if count_alive_jobs > 1 and not args.allow_multiple:
raise SystemExit(f"Found {count_alive_jobs} alive jobs. Expected only one.")
if count_alive_jobs == 1:
print("Found one alive job.")
else:
print(f"Found {count_alive_jobs} alive jobs.")
| 2,352 | 40.280702 | 109 | py |
airflow | airflow-main/airflow/cli/commands/cheat_sheet_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Iterable
from airflow.cli.cli_parser import ActionCommand, GroupCommand, airflow_commands
from airflow.cli.simple_table import AirflowConsole, SimpleTable
from airflow.utils.cli import suppress_logs_and_warning
@suppress_logs_and_warning
def cheat_sheet(args):
"""Display cheat-sheet."""
display_commands_index()
def display_commands_index():
"""Display list of all commands."""
def display_recursive(
prefix: list[str],
commands: Iterable[GroupCommand | ActionCommand],
help_msg: str | None = None,
):
actions: list[ActionCommand] = []
groups: list[GroupCommand] = []
for command in commands:
if isinstance(command, GroupCommand):
groups.append(command)
else:
actions.append(command)
console = AirflowConsole()
if actions:
table = SimpleTable(title=help_msg or "Miscellaneous commands")
table.add_column(width=40)
table.add_column()
for action_command in sorted(actions, key=lambda d: d.name):
table.add_row(" ".join([*prefix, action_command.name]), action_command.help)
console.print(table)
if groups:
for group_command in sorted(groups, key=lambda d: d.name):
group_prefix = [*prefix, group_command.name]
display_recursive(group_prefix, group_command.subcommands, group_command.help)
display_recursive(["airflow"], airflow_commands)
| 2,357 | 36.428571 | 94 | py |
airflow | airflow-main/airflow/cli/commands/plugins_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import inspect
from typing import Any
from airflow import plugins_manager
from airflow.cli.simple_table import AirflowConsole
from airflow.plugins_manager import PluginsDirectorySource, get_plugin_info
from airflow.utils.cli import suppress_logs_and_warning
def _get_name(class_like_object) -> str:
if isinstance(class_like_object, (str, PluginsDirectorySource)):
return str(class_like_object)
if inspect.isclass(class_like_object):
return class_like_object.__name__
return class_like_object.__class__.__name__
def _join_plugins_names(value: list[Any] | Any) -> str:
value = value if isinstance(value, list) else [value]
return ",".join(_get_name(v) for v in value)
@suppress_logs_and_warning
def dump_plugins(args):
"""Dump plugins information."""
plugins_info: list[dict[str, str]] = get_plugin_info()
if not plugins_manager.plugins:
print("No plugins loaded")
return
# Remove empty info
if args.output == "table":
# We can do plugins_info[0] as the element it will exist as there's
# at least one plugin at this point
for col in list(plugins_info[0]):
if all(not bool(p[col]) for p in plugins_info):
for plugin in plugins_info:
del plugin[col]
AirflowConsole().print_as(plugins_info, output=args.output)
| 2,185 | 36.050847 | 75 | py |
airflow | airflow-main/airflow/cli/commands/connection_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Connection sub-commands."""
from __future__ import annotations
import io
import json
import os
import sys
import warnings
from pathlib import Path
from typing import Any
from urllib.parse import urlsplit, urlunsplit
from sqlalchemy import select
from sqlalchemy.orm import exc
from airflow.cli.simple_table import AirflowConsole
from airflow.compat.functools import cache
from airflow.configuration import conf
from airflow.exceptions import AirflowNotFoundException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers_manager import ProvidersManager
from airflow.secrets.local_filesystem import load_connections_dict
from airflow.utils import cli as cli_utils, helpers, yaml
from airflow.utils.cli import suppress_logs_and_warning
from airflow.utils.session import create_session
def _connection_mapper(conn: Connection) -> dict[str, Any]:
return {
"id": conn.id,
"conn_id": conn.conn_id,
"conn_type": conn.conn_type,
"description": conn.description,
"host": conn.host,
"schema": conn.schema,
"login": conn.login,
"password": conn.password,
"port": conn.port,
"is_encrypted": conn.is_encrypted,
"is_extra_encrypted": conn.is_encrypted,
"extra_dejson": conn.extra_dejson,
"get_uri": conn.get_uri(),
}
@suppress_logs_and_warning
def connections_get(args):
"""Get a connection."""
try:
conn = BaseHook.get_connection(args.conn_id)
except AirflowNotFoundException:
raise SystemExit("Connection not found.")
AirflowConsole().print_as(
data=[conn],
output=args.output,
mapper=_connection_mapper,
)
@suppress_logs_and_warning
def connections_list(args):
"""Lists all connections at the command line."""
with create_session() as session:
query = select(Connection)
if args.conn_id:
query = query.where(Connection.conn_id == args.conn_id)
query = session.scalars(query)
conns = query.all()
AirflowConsole().print_as(
data=conns,
output=args.output,
mapper=_connection_mapper,
)
def _connection_to_dict(conn: Connection) -> dict:
return dict(
conn_type=conn.conn_type,
description=conn.description,
login=conn.login,
password=conn.password,
host=conn.host,
port=conn.port,
schema=conn.schema,
extra=conn.extra,
)
def _format_connections(conns: list[Connection], file_format: str, serialization_format: str) -> str:
if serialization_format == "json":
serializer_func = lambda x: json.dumps(_connection_to_dict(x))
elif serialization_format == "uri":
serializer_func = Connection.get_uri
else:
raise SystemExit(f"Received unexpected value for `--serialization-format`: {serialization_format!r}")
if file_format == ".env":
connections_env = ""
for conn in conns:
connections_env += f"{conn.conn_id}={serializer_func(conn)}\n"
return connections_env
connections_dict = {}
for conn in conns:
connections_dict[conn.conn_id] = _connection_to_dict(conn)
if file_format == ".yaml":
return yaml.dump(connections_dict)
if file_format == ".json":
return json.dumps(connections_dict, indent=2)
return json.dumps(connections_dict)
def _is_stdout(fileio: io.TextIOWrapper) -> bool:
return fileio.name == "<stdout>"
def _valid_uri(uri: str) -> bool:
"""Check if a URI is valid, by checking if scheme (conn_type) provided."""
return urlsplit(uri).scheme != ""
@cache
def _get_connection_types() -> list[str]:
"""Returns connection types available."""
_connection_types = ["fs", "mesos_framework-id", "email", "generic"]
providers_manager = ProvidersManager()
for connection_type, provider_info in providers_manager.hooks.items():
if provider_info:
_connection_types.append(connection_type)
return _connection_types
def connections_export(args):
"""Exports all connections to a file."""
file_formats = [".yaml", ".json", ".env"]
if args.format:
warnings.warn("Option `--format` is deprecated. Use `--file-format` instead.", DeprecationWarning)
if args.format and args.file_format:
raise SystemExit("Option `--format` is deprecated. Use `--file-format` instead.")
default_format = ".json"
provided_file_format = None
if args.format or args.file_format:
provided_file_format = f".{(args.format or args.file_format).lower()}"
file_is_stdout = _is_stdout(args.file)
if file_is_stdout:
filetype = provided_file_format or default_format
elif provided_file_format:
filetype = provided_file_format
else:
filetype = Path(args.file.name).suffix
filetype = filetype.lower()
if filetype not in file_formats:
raise SystemExit(
f"Unsupported file format. The file must have the extension {', '.join(file_formats)}."
)
if args.serialization_format and not filetype == ".env":
raise SystemExit("Option `--serialization-format` may only be used with file type `env`.")
with create_session() as session:
connections = session.scalars(select(Connection).order_by(Connection.conn_id)).all()
msg = _format_connections(
conns=connections,
file_format=filetype,
serialization_format=args.serialization_format or "uri",
)
with args.file as f:
f.write(msg)
if file_is_stdout:
print("\nConnections successfully exported.", file=sys.stderr)
else:
print(f"Connections successfully exported to {args.file.name}.")
alternative_conn_specs = ["conn_type", "conn_host", "conn_login", "conn_password", "conn_schema", "conn_port"]
@cli_utils.action_cli
def connections_add(args):
"""Adds new connection."""
has_uri = bool(args.conn_uri)
has_json = bool(args.conn_json)
has_type = bool(args.conn_type)
# Validate connection-id
try:
helpers.validate_key(args.conn_id, max_length=200)
except Exception as e:
raise SystemExit(f"Could not create connection. {e}")
if not has_type and not (has_json or has_uri):
raise SystemExit("Must supply either conn-uri or conn-json if not supplying conn-type")
if has_json and has_uri:
raise SystemExit("Cannot supply both conn-uri and conn-json")
if has_type and args.conn_type not in _get_connection_types():
warnings.warn(f"The type provided to --conn-type is invalid: {args.conn_type}")
warnings.warn(
f"Supported --conn-types are:{_get_connection_types()}."
"Hence overriding the conn-type with generic"
)
args.conn_type = "generic"
if has_uri or has_json:
invalid_args = []
if has_uri and not _valid_uri(args.conn_uri):
raise SystemExit(f"The URI provided to --conn-uri is invalid: {args.conn_uri}")
for arg in alternative_conn_specs:
if getattr(args, arg) is not None:
invalid_args.append(arg)
if has_json and args.conn_extra:
invalid_args.append("--conn-extra")
if invalid_args:
raise SystemExit(
"The following args are not compatible with "
f"the --conn-{'uri' if has_uri else 'json'} flag: {invalid_args!r}"
)
if args.conn_uri:
new_conn = Connection(conn_id=args.conn_id, description=args.conn_description, uri=args.conn_uri)
if args.conn_extra is not None:
new_conn.set_extra(args.conn_extra)
elif args.conn_json:
new_conn = Connection.from_json(conn_id=args.conn_id, value=args.conn_json)
if not new_conn.conn_type:
raise SystemExit("conn-json is invalid; must supply conn-type")
else:
new_conn = Connection(
conn_id=args.conn_id,
conn_type=args.conn_type,
description=args.conn_description,
host=args.conn_host,
login=args.conn_login,
password=args.conn_password,
schema=args.conn_schema,
port=args.conn_port,
)
if args.conn_extra is not None:
new_conn.set_extra(args.conn_extra)
with create_session() as session:
if not session.scalar(select(Connection).where(Connection.conn_id == new_conn.conn_id).limit(1)):
session.add(new_conn)
msg = "Successfully added `conn_id`={conn_id} : {uri}"
msg = msg.format(
conn_id=new_conn.conn_id,
uri=args.conn_uri
or urlunsplit(
(
new_conn.conn_type,
f"{new_conn.login or ''}:{'******' if new_conn.password else ''}"
f"@{new_conn.host or ''}:{new_conn.port or ''}",
new_conn.schema or "",
"",
"",
)
),
)
print(msg)
else:
msg = f"A connection with `conn_id`={new_conn.conn_id} already exists."
raise SystemExit(msg)
@cli_utils.action_cli
def connections_delete(args):
"""Deletes connection from DB."""
with create_session() as session:
try:
to_delete = session.scalars(select(Connection).where(Connection.conn_id == args.conn_id)).one()
except exc.NoResultFound:
raise SystemExit(f"Did not find a connection with `conn_id`={args.conn_id}")
except exc.MultipleResultsFound:
raise SystemExit(f"Found more than one connection with `conn_id`={args.conn_id}")
else:
session.delete(to_delete)
print(f"Successfully deleted connection with `conn_id`={to_delete.conn_id}")
@cli_utils.action_cli(check_db=False)
def connections_import(args):
"""Imports connections from a file."""
if os.path.exists(args.file):
_import_helper(args.file, args.overwrite)
else:
raise SystemExit("Missing connections file.")
def _import_helper(file_path: str, overwrite: bool) -> None:
"""Load connections from a file and save them to the DB.
:param overwrite: Whether to skip or overwrite on collision.
"""
connections_dict = load_connections_dict(file_path)
with create_session() as session:
for conn_id, conn in connections_dict.items():
try:
helpers.validate_key(conn_id, max_length=200)
except Exception as e:
print(f"Could not import connection. {e}")
continue
existing_conn_id = session.scalar(select(Connection.id).where(Connection.conn_id == conn_id))
if existing_conn_id is not None:
if not overwrite:
print(f"Could not import connection {conn_id}: connection already exists.")
continue
# The conn_ids match, but the PK of the new entry must also be the same as the old
conn.id = existing_conn_id
session.merge(conn)
session.commit()
print(f"Imported connection {conn_id}")
@suppress_logs_and_warning
def connections_test(args) -> None:
"""Test an Airflow connection."""
console = AirflowConsole()
if conf.get("core", "test_connection", fallback="Disabled").lower().strip() != "enabled":
console.print(
"[bold yellow]\nTesting connections is disabled in Airflow configuration. "
"Contact your deployment admin to enable it.\n"
)
raise SystemExit(1)
print(f"Retrieving connection: {args.conn_id!r}")
try:
conn = BaseHook.get_connection(args.conn_id)
except AirflowNotFoundException:
console.print("[bold yellow]\nConnection not found.\n")
raise SystemExit(1)
print("\nTesting...")
status, message = conn.test_connection()
if status is True:
console.print("[bold green]\nConnection success!\n")
else:
console.print(f"[bold][red]\nConnection failed![/bold]\n{message}\n")
| 13,065 | 34.409214 | 110 | py |
airflow | airflow-main/airflow/cli/commands/legacy_commands.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from argparse import ArgumentError
COMMAND_MAP = {
"worker": "celery worker",
"flower": "celery flower",
"trigger_dag": "dags trigger",
"delete_dag": "dags delete",
"show_dag": "dags show",
"list_dag": "dags list",
"dag_status": "dags status",
"backfill": "dags backfill",
"list_dag_runs": "dags list-runs",
"pause": "dags pause",
"unpause": "dags unpause",
"test": "tasks test",
"clear": "tasks clear",
"list_tasks": "tasks list",
"task_failed_deps": "tasks failed-deps",
"task_state": "tasks state",
"run": "tasks run",
"render": "tasks render",
"initdb": "db init",
"resetdb": "db reset",
"upgradedb": "db upgrade",
"checkdb": "db check",
"shell": "db shell",
"pool": "pools",
"list_users": "users list",
"create_user": "users create",
"delete_user": "users delete",
}
def check_legacy_command(action, value):
"""Checks command value and raise error if value is in removed command."""
new_command = COMMAND_MAP.get(value)
if new_command is not None:
msg = f"`airflow {value}` command, has been removed, please use `airflow {new_command}`"
raise ArgumentError(action, msg)
| 2,035 | 34.103448 | 96 | py |
airflow | airflow-main/airflow/cli/commands/dag_processor_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""DagProcessor command."""
from __future__ import annotations
import logging
from datetime import timedelta
from typing import Any
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.configuration import conf
from airflow.dag_processing.manager import DagFileProcessorManager
from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner
from airflow.jobs.job import Job, run_job
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
log = logging.getLogger(__name__)
def _create_dag_processor_job_runner(args: Any) -> DagProcessorJobRunner:
"""Creates DagFileProcessorProcess instance."""
processor_timeout_seconds: int = conf.getint("core", "dag_file_processor_timeout")
processor_timeout = timedelta(seconds=processor_timeout_seconds)
return DagProcessorJobRunner(
job=Job(),
processor=DagFileProcessorManager(
processor_timeout=processor_timeout,
dag_directory=args.subdir,
max_runs=args.num_runs,
dag_ids=[],
pickle_dags=args.do_pickle,
),
)
@cli_utils.action_cli
def dag_processor(args):
"""Starts Airflow Dag Processor Job."""
if not conf.getboolean("scheduler", "standalone_dag_processor"):
raise SystemExit("The option [scheduler/standalone_dag_processor] must be True.")
sql_conn: str = conf.get("database", "sql_alchemy_conn").lower()
if sql_conn.startswith("sqlite"):
raise SystemExit("Standalone DagProcessor is not supported when using sqlite.")
job_runner = _create_dag_processor_job_runner(args)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations(
"dag-processor", args.pid, args.stdout, args.stderr, args.log_file
)
handle = setup_logging(log_file)
with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle:
stdout_handle.truncate(0)
stderr_handle.truncate(0)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout_handle,
stderr=stderr_handle,
umask=int(settings.DAEMON_UMASK, 8),
)
with ctx:
run_job(job=job_runner.job, execute_callable=job_runner._execute)
else:
run_job(job=job_runner.job, execute_callable=job_runner._execute)
| 3,294 | 36.873563 | 89 | py |
airflow | airflow-main/airflow/cli/commands/info_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Config sub-commands."""
from __future__ import annotations
import locale
import logging
import os
import platform
import subprocess
import sys
from enum import Enum
from urllib.parse import urlsplit, urlunsplit
import httpx
import tenacity
from airflow import configuration
from airflow.cli.simple_table import AirflowConsole
from airflow.providers_manager import ProvidersManager
from airflow.typing_compat import Protocol
from airflow.utils.cli import suppress_logs_and_warning
from airflow.utils.platform import getuser
from airflow.version import version as airflow_version
log = logging.getLogger(__name__)
class Anonymizer(Protocol):
"""Anonymizer protocol."""
def process_path(self, value) -> str:
"""Remove pii from paths."""
def process_username(self, value) -> str:
"""Remove pii from username."""
def process_url(self, value) -> str:
"""Remove pii from URL."""
class NullAnonymizer(Anonymizer):
"""Do nothing."""
def _identity(self, value) -> str:
return value
process_path = process_username = process_url = _identity
del _identity
class PiiAnonymizer(Anonymizer):
"""Remove personally identifiable info from path."""
def __init__(self):
home_path = os.path.expanduser("~")
username = getuser()
self._path_replacements = {home_path: "${HOME}", username: "${USER}"}
def process_path(self, value) -> str:
if not value:
return value
for src, target in self._path_replacements.items():
value = value.replace(src, target)
return value
def process_username(self, value) -> str:
if not value:
return value
return value[0] + "..." + value[-1]
def process_url(self, value) -> str:
if not value:
return value
url_parts = urlsplit(value)
netloc = None
if url_parts.netloc:
# unpack
userinfo = None
username = None
password = None
if "@" in url_parts.netloc:
userinfo, _, host = url_parts.netloc.partition("@")
else:
host = url_parts.netloc
if userinfo:
if ":" in userinfo:
username, _, password = userinfo.partition(":")
else:
username = userinfo
# anonymize
username = self.process_username(username) if username else None
password = "PASSWORD" if password else None
# pack
if username and password and host:
netloc = username + ":" + password + "@" + host
elif username and host:
netloc = username + "@" + host
elif password and host:
netloc = ":" + password + "@" + host
elif host:
netloc = host
else:
netloc = ""
return urlunsplit((url_parts.scheme, netloc, url_parts.path, url_parts.query, url_parts.fragment))
class OperatingSystem(Enum):
"""Operating system."""
WINDOWS = "Windows"
LINUX = "Linux"
MACOSX = "Mac OS"
CYGWIN = "Cygwin"
UNKNOWN = "Unknown"
@staticmethod
def get_current() -> OperatingSystem:
"""Get current operating system."""
if os.name == "nt":
return OperatingSystem.WINDOWS
elif "linux" in sys.platform:
return OperatingSystem.LINUX
elif "darwin" in sys.platform:
return OperatingSystem.MACOSX
elif "cygwin" in sys.platform:
return OperatingSystem.CYGWIN
return OperatingSystem.UNKNOWN
class Architecture(Enum):
"""Compute architecture."""
X86_64 = "x86_64"
X86 = "x86"
PPC = "ppc"
ARM = "arm"
UNKNOWN = "unknown"
@staticmethod
def get_current() -> Architecture:
"""Get architecture."""
current_architecture = _MACHINE_TO_ARCHITECTURE.get(platform.machine().lower())
return current_architecture if current_architecture else Architecture.UNKNOWN
_MACHINE_TO_ARCHITECTURE: dict[str, Architecture] = {
"amd64": Architecture.X86_64,
"x86_64": Architecture.X86_64,
"i686-64": Architecture.X86_64,
"i386": Architecture.X86,
"i686": Architecture.X86,
"x86": Architecture.X86,
"ia64": Architecture.X86, # Itanium is different x64 arch, treat it as the common x86.
"powerpc": Architecture.PPC,
"power macintosh": Architecture.PPC,
"ppc64": Architecture.PPC,
"armv6": Architecture.ARM,
"armv6l": Architecture.ARM,
"arm64": Architecture.ARM,
"armv7": Architecture.ARM,
"armv7l": Architecture.ARM,
"aarch64": Architecture.ARM,
}
class AirflowInfo:
"""Renders information about Airflow instance."""
def __init__(self, anonymizer):
self.anonymizer = anonymizer
@staticmethod
def _get_version(cmd: list[str], grep: bytes | None = None):
"""Return tools version."""
try:
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
stdoutdata, _ = proc.communicate()
data = [f for f in stdoutdata.split(b"\n") if f]
if grep:
data = [line for line in data if grep in line]
if len(data) != 1:
return "NOT AVAILABLE"
else:
return data[0].decode()
except OSError:
return "NOT AVAILABLE"
@staticmethod
def _task_logging_handler():
"""Returns task logging handler."""
def get_fullname(o):
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__ # Avoid reporting __builtin__
else:
return module + "." + o.__class__.__name__
try:
handler_names = [get_fullname(handler) for handler in logging.getLogger("airflow.task").handlers]
return ", ".join(handler_names)
except Exception:
return "NOT AVAILABLE"
@property
def _airflow_info(self):
executor = configuration.conf.get("core", "executor")
sql_alchemy_conn = self.anonymizer.process_url(
configuration.conf.get("database", "SQL_ALCHEMY_CONN", fallback="NOT AVAILABLE")
)
dags_folder = self.anonymizer.process_path(
configuration.conf.get("core", "dags_folder", fallback="NOT AVAILABLE")
)
plugins_folder = self.anonymizer.process_path(
configuration.conf.get("core", "plugins_folder", fallback="NOT AVAILABLE")
)
base_log_folder = self.anonymizer.process_path(
configuration.conf.get("logging", "base_log_folder", fallback="NOT AVAILABLE")
)
remote_base_log_folder = self.anonymizer.process_path(
configuration.conf.get("logging", "remote_base_log_folder", fallback="NOT AVAILABLE")
)
return [
("version", airflow_version),
("executor", executor),
("task_logging_handler", self._task_logging_handler()),
("sql_alchemy_conn", sql_alchemy_conn),
("dags_folder", dags_folder),
("plugins_folder", plugins_folder),
("base_log_folder", base_log_folder),
("remote_base_log_folder", remote_base_log_folder),
]
@property
def _system_info(self):
operating_system = OperatingSystem.get_current()
arch = Architecture.get_current()
uname = platform.uname()
_locale = locale.getdefaultlocale()
python_location = self.anonymizer.process_path(sys.executable)
python_version = sys.version.replace("\n", " ")
return [
("OS", operating_system.value),
("architecture", arch.value),
("uname", str(uname)),
("locale", str(_locale)),
("python_version", python_version),
("python_location", python_location),
]
@property
def _tools_info(self):
git_version = self._get_version(["git", "--version"])
ssh_version = self._get_version(["ssh", "-V"])
kubectl_version = self._get_version(["kubectl", "version", "--short=True", "--client=True"])
gcloud_version = self._get_version(["gcloud", "version"], grep=b"Google Cloud SDK")
cloud_sql_proxy_version = self._get_version(["cloud_sql_proxy", "--version"])
mysql_version = self._get_version(["mysql", "--version"])
sqlite3_version = self._get_version(["sqlite3", "--version"])
psql_version = self._get_version(["psql", "--version"])
return [
("git", git_version),
("ssh", ssh_version),
("kubectl", kubectl_version),
("gcloud", gcloud_version),
("cloud_sql_proxy", cloud_sql_proxy_version),
("mysql", mysql_version),
("sqlite3", sqlite3_version),
("psql", psql_version),
]
@property
def _paths_info(self):
system_path = os.environ.get("PATH", "").split(os.pathsep)
airflow_home = self.anonymizer.process_path(configuration.get_airflow_home())
system_path = [self.anonymizer.process_path(p) for p in system_path]
python_path = [self.anonymizer.process_path(p) for p in sys.path]
airflow_on_path = any(os.path.exists(os.path.join(path_elem, "airflow")) for path_elem in system_path)
return [
("airflow_home", airflow_home),
("system_path", os.pathsep.join(system_path)),
("python_path", os.pathsep.join(python_path)),
("airflow_on_path", str(airflow_on_path)),
]
@property
def _providers_info(self):
return [(p.data["package-name"], p.version) for p in ProvidersManager().providers.values()]
def show(self, output: str, console: AirflowConsole | None = None) -> None:
"""Shows information about Airflow instance."""
all_info = {
"Apache Airflow": self._airflow_info,
"System info": self._system_info,
"Tools info": self._tools_info,
"Paths info": self._paths_info,
"Providers info": self._providers_info,
}
console = console or AirflowConsole(show_header=False)
if output in ("table", "plain"):
# Show each info as table with key, value column
for key, info in all_info.items():
console.print(f"\n[bold][green]{key}[/bold][/green]", highlight=False)
console.print_as(data=[{"key": k, "value": v} for k, v in info], output=output)
else:
# Render info in given format, change keys to snake_case
console.print_as(
data=[{k.lower().replace(" ", "_"): dict(v)} for k, v in all_info.items()], output=output
)
def render_text(self, output: str) -> str:
"""Exports the info to string."""
console = AirflowConsole(record=True)
with console.capture():
self.show(output=output, console=console)
return console.export_text()
class FileIoException(Exception):
"""Raises when error happens in FileIo.io integration."""
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=1, max=10),
retry=tenacity.retry_if_exception_type(FileIoException),
before=tenacity.before_log(log, logging.DEBUG),
after=tenacity.after_log(log, logging.DEBUG),
)
def _upload_text_to_fileio(content):
"""Upload text file to File.io service and return link."""
resp = httpx.post("https://file.io", content=content)
if resp.status_code not in [200, 201]:
print(resp.json())
raise FileIoException("Failed to send report to file.io service.")
try:
return resp.json()["link"]
except ValueError as e:
log.debug(e)
raise FileIoException("Failed to send report to file.io service.")
def _send_report_to_fileio(info):
print("Uploading report to file.io service.")
try:
link = _upload_text_to_fileio(str(info))
print("Report uploaded.")
print(link)
print()
except FileIoException as ex:
print(str(ex))
@suppress_logs_and_warning
def show_info(args):
"""Show information related to Airflow, system and other."""
# Enforce anonymization, when file_io upload is tuned on.
anonymizer = PiiAnonymizer() if args.anonymize or args.file_io else NullAnonymizer()
info = AirflowInfo(anonymizer)
if args.file_io:
_send_report_to_fileio(info.render_text(args.output))
else:
info.show(args.output)
| 13,569 | 33.794872 | 110 | py |
airflow | airflow-main/airflow/cli/commands/webserver_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Webserver command."""
from __future__ import annotations
import logging
import os
import signal
import subprocess
import sys
import textwrap
import time
import types
from contextlib import suppress
from time import sleep
from typing import NoReturn
import daemon
import psutil
from daemon.pidfile import TimeoutPIDLockFile
from lockfile.pidlockfile import read_pid_from_pidfile
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowWebServerTimeout
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
from airflow.utils.hashlib_wrapper import md5
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.process_utils import check_if_pidfile_process_is_running
log = logging.getLogger(__name__)
class GunicornMonitor(LoggingMixin):
"""
Runs forever.
Monitoring the child processes of @gunicorn_master_proc and restarting
workers occasionally or when files in the plug-in directory has been modified.
Each iteration of the loop traverses one edge of this state transition
diagram, where each state (node) represents
[ num_ready_workers_running / num_workers_running ]. We expect most time to
be spent in [n / n]. `bs` is the setting webserver.worker_refresh_batch_size.
The horizontal transition at ? happens after the new worker parses all the
dags (so it could take a while!)
V ────────────────────────────────────────────────────────────────────────┐
[n / n] ──TTIN──> [ [n, n+bs) / n + bs ] ────?───> [n + bs / n + bs] ──TTOU─┘
^ ^───────────────┘
│
│ ┌────────────────v
└──────┴────── [ [0, n) / n ] <─── start
We change the number of workers by sending TTIN and TTOU to the gunicorn
master process, which increases and decreases the number of child workers
respectively. Gunicorn guarantees that on TTOU workers are terminated
gracefully and that the oldest worker is terminated.
:param gunicorn_master_pid: PID for the main Gunicorn process
:param num_workers_expected: Number of workers to run the Gunicorn web server
:param master_timeout: Number of seconds the webserver waits before killing gunicorn master that
doesn't respond
:param worker_refresh_interval: Number of seconds to wait before refreshing a batch of workers.
:param worker_refresh_batch_size: Number of workers to refresh at a time. When set to 0, worker
refresh is disabled. When nonzero, airflow periodically refreshes webserver workers by
bringing up new ones and killing old ones.
:param reload_on_plugin_change: If set to True, Airflow will track files in plugins_folder directory.
When it detects changes, then reload the gunicorn.
"""
def __init__(
self,
gunicorn_master_pid: int,
num_workers_expected: int,
master_timeout: int,
worker_refresh_interval: int,
worker_refresh_batch_size: int,
reload_on_plugin_change: bool,
):
super().__init__()
self.gunicorn_master_proc = psutil.Process(gunicorn_master_pid)
self.num_workers_expected = num_workers_expected
self.master_timeout = master_timeout
self.worker_refresh_interval = worker_refresh_interval
self.worker_refresh_batch_size = worker_refresh_batch_size
self.reload_on_plugin_change = reload_on_plugin_change
self._num_workers_running = 0
self._num_ready_workers_running = 0
self._last_refresh_time = time.monotonic() if worker_refresh_interval > 0 else None
self._last_plugin_state = self._generate_plugin_state() if reload_on_plugin_change else None
self._restart_on_next_plugin_check = False
def _generate_plugin_state(self) -> dict[str, float]:
"""
Get plugin states.
Generate dict of filenames and last modification time of all files in settings.PLUGINS_FOLDER
directory.
"""
if not settings.PLUGINS_FOLDER:
return {}
all_filenames: list[str] = []
for (root, _, filenames) in os.walk(settings.PLUGINS_FOLDER):
all_filenames.extend(os.path.join(root, f) for f in filenames)
plugin_state = {f: self._get_file_hash(f) for f in sorted(all_filenames)}
return plugin_state
@staticmethod
def _get_file_hash(fname: str):
"""Calculate MD5 hash for file."""
hash_md5 = md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _get_num_ready_workers_running(self) -> int:
"""Returns number of ready Gunicorn workers by looking for READY_PREFIX in process name."""
workers = psutil.Process(self.gunicorn_master_proc.pid).children()
def ready_prefix_on_cmdline(proc):
try:
cmdline = proc.cmdline()
if len(cmdline) > 0:
return settings.GUNICORN_WORKER_READY_PREFIX in cmdline[0]
except psutil.NoSuchProcess:
pass
return False
ready_workers = [proc for proc in workers if ready_prefix_on_cmdline(proc)]
return len(ready_workers)
def _get_num_workers_running(self) -> int:
"""Returns number of running Gunicorn workers processes."""
workers = psutil.Process(self.gunicorn_master_proc.pid).children()
return len(workers)
def _wait_until_true(self, fn, timeout: int = 0) -> None:
"""Sleeps until fn is true."""
start_time = time.monotonic()
while not fn():
if 0 < timeout <= time.monotonic() - start_time:
raise AirflowWebServerTimeout(f"No response from gunicorn master within {timeout} seconds")
sleep(0.1)
def _spawn_new_workers(self, count: int) -> None:
"""
Send signal to kill the worker.
:param count: The number of workers to spawn
"""
excess = 0
for _ in range(count):
# TTIN: Increment the number of processes by one
self.gunicorn_master_proc.send_signal(signal.SIGTTIN)
excess += 1
self._wait_until_true(
lambda: self.num_workers_expected + excess == self._get_num_workers_running(),
timeout=self.master_timeout,
)
def _kill_old_workers(self, count: int) -> None:
"""
Send signal to kill the worker.
:param count: The number of workers to kill
"""
for _ in range(count):
count -= 1
# TTOU: Decrement the number of processes by one
self.gunicorn_master_proc.send_signal(signal.SIGTTOU)
self._wait_until_true(
lambda: self.num_workers_expected + count == self._get_num_workers_running(),
timeout=self.master_timeout,
)
def _reload_gunicorn(self) -> None:
"""
Send signal to reload the gunicorn configuration.
When gunicorn receive signals, it reloads the configuration,
start the new worker processes with a new configuration and gracefully
shutdown older workers.
"""
# HUP: Reload the configuration.
self.gunicorn_master_proc.send_signal(signal.SIGHUP)
sleep(1)
self._wait_until_true(
lambda: self.num_workers_expected == self._get_num_workers_running(), timeout=self.master_timeout
)
def start(self) -> NoReturn:
"""Starts monitoring the webserver."""
try:
self._wait_until_true(
lambda: self.num_workers_expected == self._get_num_workers_running(),
timeout=self.master_timeout,
)
while True:
if not self.gunicorn_master_proc.is_running():
sys.exit(1)
self._check_workers()
# Throttle loop
sleep(1)
except (AirflowWebServerTimeout, OSError) as err:
self.log.error(err)
self.log.error("Shutting down webserver")
try:
self.gunicorn_master_proc.terminate()
self.gunicorn_master_proc.wait()
finally:
sys.exit(1)
def _check_workers(self) -> None:
num_workers_running = self._get_num_workers_running()
num_ready_workers_running = self._get_num_ready_workers_running()
# Whenever some workers are not ready, wait until all workers are ready
if num_ready_workers_running < num_workers_running:
self.log.debug(
"[%d / %d] Some workers are starting up, waiting...",
num_ready_workers_running,
num_workers_running,
)
sleep(1)
return
# If there are too many workers, then kill a worker gracefully by asking gunicorn to reduce
# number of workers
if num_workers_running > self.num_workers_expected:
excess = min(num_workers_running - self.num_workers_expected, self.worker_refresh_batch_size)
self.log.debug(
"[%d / %d] Killing %s workers", num_ready_workers_running, num_workers_running, excess
)
self._kill_old_workers(excess)
return
# If there are too few workers, start a new worker by asking gunicorn
# to increase number of workers
if num_workers_running < self.num_workers_expected:
self.log.error(
"[%d / %d] Some workers seem to have died and gunicorn did not restart them as expected",
num_ready_workers_running,
num_workers_running,
)
sleep(10)
num_workers_running = self._get_num_workers_running()
if num_workers_running < self.num_workers_expected:
new_worker_count = min(
self.num_workers_expected - num_workers_running, self.worker_refresh_batch_size
)
# log at info since we are trying fix an error logged just above
self.log.info(
"[%d / %d] Spawning %d workers",
num_ready_workers_running,
num_workers_running,
new_worker_count,
)
self._spawn_new_workers(new_worker_count)
return
# Now the number of running and expected worker should be equal
# If workers should be restarted periodically.
if self.worker_refresh_interval > 0 and self._last_refresh_time:
# and we refreshed the workers a long time ago, refresh the workers
last_refresh_diff = time.monotonic() - self._last_refresh_time
if self.worker_refresh_interval < last_refresh_diff:
num_new_workers = self.worker_refresh_batch_size
self.log.debug(
"[%d / %d] Starting doing a refresh. Starting %d workers.",
num_ready_workers_running,
num_workers_running,
num_new_workers,
)
self._spawn_new_workers(num_new_workers)
self._last_refresh_time = time.monotonic()
return
# if we should check the directory with the plugin,
if self.reload_on_plugin_change:
# compare the previous and current contents of the directory
new_state = self._generate_plugin_state()
# If changed, wait until its content is fully saved.
if new_state != self._last_plugin_state:
self.log.debug(
"[%d / %d] Plugins folder changed. The gunicorn will be restarted the next time the "
"plugin directory is checked, if there is no change in it.",
num_ready_workers_running,
num_workers_running,
)
self._restart_on_next_plugin_check = True
self._last_plugin_state = new_state
elif self._restart_on_next_plugin_check:
self.log.debug(
"[%d / %d] Starts reloading the gunicorn configuration.",
num_ready_workers_running,
num_workers_running,
)
self._restart_on_next_plugin_check = False
self._last_refresh_time = time.monotonic()
self._reload_gunicorn()
@cli_utils.action_cli
def webserver(args):
"""Starts Airflow Webserver."""
print(settings.HEADER)
# Check for old/insecure config, and fail safe (i.e. don't launch) if the config is wildly insecure.
if conf.get("webserver", "secret_key") == "temporary_key":
from rich import print as rich_print
rich_print(
"[red][bold]ERROR:[/bold] The `secret_key` setting under the webserver config has an insecure "
"value - Airflow has failed safe and refuses to start. Please change this value to a new, "
"per-environment, randomly generated string, for example using this command `[cyan]openssl rand "
"-hex 30[/cyan]`",
file=sys.stderr,
)
sys.exit(1)
access_logfile = args.access_logfile or conf.get("webserver", "access_logfile")
error_logfile = args.error_logfile or conf.get("webserver", "error_logfile")
access_logformat = args.access_logformat or conf.get("webserver", "access_logformat")
num_workers = args.workers or conf.get("webserver", "workers")
worker_timeout = args.worker_timeout or conf.get("webserver", "web_server_worker_timeout")
ssl_cert = args.ssl_cert or conf.get("webserver", "web_server_ssl_cert")
ssl_key = args.ssl_key or conf.get("webserver", "web_server_ssl_key")
if not ssl_cert and ssl_key:
raise AirflowException("An SSL certificate must also be provided for use with " + ssl_key)
if ssl_cert and not ssl_key:
raise AirflowException("An SSL key must also be provided for use with " + ssl_cert)
from airflow.www.app import create_app
if args.debug:
print(f"Starting the web server on port {args.port} and host {args.hostname}.")
app = create_app(testing=conf.getboolean("core", "unit_test_mode"))
app.run(
debug=True,
use_reloader=not app.config["TESTING"],
port=args.port,
host=args.hostname,
ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None,
)
else:
pid_file, stdout, stderr, log_file = setup_locations(
"webserver", args.pid, args.stdout, args.stderr, args.log_file
)
# Check if webserver is already running if not, remove old pidfile
check_if_pidfile_process_is_running(pid_file=pid_file, process_name="webserver")
print(
textwrap.dedent(
f"""\
Running the Gunicorn Server with:
Workers: {num_workers} {args.workerclass}
Host: {args.hostname}:{args.port}
Timeout: {worker_timeout}
Logfiles: {access_logfile} {error_logfile}
Access Logformat: {access_logformat}
================================================================="""
)
)
run_args = [
sys.executable,
"-m",
"gunicorn",
"--workers",
str(num_workers),
"--worker-class",
str(args.workerclass),
"--timeout",
str(worker_timeout),
"--bind",
args.hostname + ":" + str(args.port),
"--name",
"airflow-webserver",
"--pid",
pid_file,
"--config",
"python:airflow.www.gunicorn_config",
]
if args.access_logfile:
run_args += ["--access-logfile", str(args.access_logfile)]
if args.error_logfile:
run_args += ["--error-logfile", str(args.error_logfile)]
if args.access_logformat and args.access_logformat.strip():
run_args += ["--access-logformat", str(args.access_logformat)]
if args.daemon:
run_args += ["--daemon"]
if ssl_cert:
run_args += ["--certfile", ssl_cert, "--keyfile", ssl_key]
run_args += ["airflow.www.app:cached_app()"]
# To prevent different workers creating the web app and
# all writing to the database at the same time, we use the --preload option.
# With the preload option, the app is loaded before the workers are forked, and each worker will
# then have a copy of the app
run_args += ["--preload"]
gunicorn_master_proc: psutil.Process | subprocess.Popen
def kill_proc(signum: int, frame: types.FrameType | None) -> NoReturn:
log.info("Received signal: %s. Closing gunicorn.", signum)
gunicorn_master_proc.terminate()
with suppress(TimeoutError):
gunicorn_master_proc.wait(timeout=30)
if isinstance(gunicorn_master_proc, subprocess.Popen):
still_running = gunicorn_master_proc.poll() is not None
else:
still_running = gunicorn_master_proc.is_running()
if still_running:
gunicorn_master_proc.kill()
sys.exit(0)
def monitor_gunicorn(gunicorn_master_pid: int) -> NoReturn:
# Register signal handlers
signal.signal(signal.SIGINT, kill_proc)
signal.signal(signal.SIGTERM, kill_proc)
# These run forever until SIG{INT, TERM, KILL, ...} signal is sent
GunicornMonitor(
gunicorn_master_pid=gunicorn_master_pid,
num_workers_expected=num_workers,
master_timeout=conf.getint("webserver", "web_server_master_timeout"),
worker_refresh_interval=conf.getint("webserver", "worker_refresh_interval", fallback=30),
worker_refresh_batch_size=conf.getint("webserver", "worker_refresh_batch_size", fallback=1),
reload_on_plugin_change=conf.getboolean(
"webserver", "reload_on_plugin_change", fallback=False
),
).start()
if args.daemon:
# This makes possible errors get reported before daemonization
os.environ["SKIP_DAGS_PARSING"] = "True"
app = create_app(None)
os.environ.pop("SKIP_DAGS_PARSING")
handle = setup_logging(log_file)
base, ext = os.path.splitext(pid_file)
with open(stdout, "a") as stdout, open(stderr, "a") as stderr:
stdout.truncate(0)
stderr.truncate(0)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(f"{base}-monitor{ext}", -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
umask=int(settings.DAEMON_UMASK, 8),
)
with ctx:
subprocess.Popen(run_args, close_fds=True)
# Reading pid of gunicorn master as it will be different that
# the one of process spawned above.
while True:
sleep(0.1)
gunicorn_master_proc_pid = read_pid_from_pidfile(pid_file)
if gunicorn_master_proc_pid:
break
# Run Gunicorn monitor
gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid)
monitor_gunicorn(gunicorn_master_proc.pid)
else:
with subprocess.Popen(run_args, close_fds=True) as gunicorn_master_proc:
monitor_gunicorn(gunicorn_master_proc.pid)
| 20,904 | 40.893788 | 109 | py |
airflow | airflow-main/airflow/cli/commands/db_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Database sub-commands."""
from __future__ import annotations
import logging
import os
import textwrap
from tempfile import NamedTemporaryFile
from packaging.version import parse as parse_version
from tenacity import RetryCallState, Retrying, stop_after_attempt, wait_fixed
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.utils import cli as cli_utils, db
from airflow.utils.db import REVISION_HEADS_MAP
from airflow.utils.db_cleanup import config_dict, drop_archived_tables, export_archived_records, run_cleanup
from airflow.utils.process_utils import execute_interactive
log = logging.getLogger(__name__)
def initdb(args):
"""Initializes the metadata database."""
print("DB: " + repr(settings.engine.url))
db.initdb()
print("Initialization done")
def resetdb(args):
"""Resets the metadata database."""
print("DB: " + repr(settings.engine.url))
if not (args.yes or input("This will drop existing tables if they exist. Proceed? (y/n)").upper() == "Y"):
raise SystemExit("Cancelled")
db.resetdb(skip_init=args.skip_init)
@cli_utils.action_cli(check_db=False)
def upgradedb(args):
"""Upgrades the metadata database."""
print("DB: " + repr(settings.engine.url))
if args.to_revision and args.to_version:
raise SystemExit("Cannot supply both `--to-revision` and `--to-version`.")
if args.from_version and args.from_revision:
raise SystemExit("Cannot supply both `--from-revision` and `--from-version`")
if (args.from_revision or args.from_version) and not args.show_sql_only:
raise SystemExit(
"Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`"
)
to_revision = None
from_revision = None
if args.from_revision:
from_revision = args.from_revision
elif args.from_version:
if parse_version(args.from_version) < parse_version("2.0.0"):
raise SystemExit("--from-version must be greater or equal to than 2.0.0")
from_revision = REVISION_HEADS_MAP.get(args.from_version)
if not from_revision:
raise SystemExit(f"Unknown version {args.from_version!r} supplied as `--from-version`.")
if args.to_version:
to_revision = REVISION_HEADS_MAP.get(args.to_version)
if not to_revision:
raise SystemExit(f"Upgrading to version {args.to_version} is not supported.")
elif args.to_revision:
to_revision = args.to_revision
if not args.show_sql_only:
print("Performing upgrade with database " + repr(settings.engine.url))
else:
print("Generating sql for upgrade -- upgrade commands will *not* be submitted.")
db.upgradedb(
to_revision=to_revision,
from_revision=from_revision,
show_sql_only=args.show_sql_only,
reserialize_dags=args.reserialize_dags,
)
if not args.show_sql_only:
print("Upgrades done")
@cli_utils.action_cli(check_db=False)
def downgrade(args):
"""Downgrades the metadata database."""
if args.to_revision and args.to_version:
raise SystemExit("Cannot supply both `--to-revision` and `--to-version`.")
if args.from_version and args.from_revision:
raise SystemExit("`--from-revision` may not be combined with `--from-version`")
if (args.from_revision or args.from_version) and not args.show_sql_only:
raise SystemExit(
"Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`"
)
if not (args.to_version or args.to_revision):
raise SystemExit("Must provide either --to-revision or --to-version.")
from_revision = None
if args.from_revision:
from_revision = args.from_revision
elif args.from_version:
from_revision = REVISION_HEADS_MAP.get(args.from_version)
if not from_revision:
raise SystemExit(f"Unknown version {args.from_version!r} supplied as `--from-version`.")
if args.to_version:
to_revision = REVISION_HEADS_MAP.get(args.to_version)
if not to_revision:
raise SystemExit(f"Downgrading to version {args.to_version} is not supported.")
elif args.to_revision:
to_revision = args.to_revision
if not args.show_sql_only:
print("Performing downgrade with database " + repr(settings.engine.url))
else:
print("Generating sql for downgrade -- downgrade commands will *not* be submitted.")
if args.show_sql_only or (
args.yes
or input(
"\nWarning: About to reverse schema migrations for the airflow metastore. "
"Please ensure you have backed up your database before any upgrade or "
"downgrade operation. Proceed? (y/n)\n"
).upper()
== "Y"
):
db.downgrade(to_revision=to_revision, from_revision=from_revision, show_sql_only=args.show_sql_only)
if not args.show_sql_only:
print("Downgrade complete")
else:
raise SystemExit("Cancelled")
def check_migrations(args):
"""Function to wait for all airflow migrations to complete. Used for launching airflow in k8s."""
db.check_migrations(timeout=args.migration_wait_timeout)
@cli_utils.action_cli(check_db=False)
def shell(args):
"""Run a shell that allows to access metadata database."""
url = settings.engine.url
print("DB: " + repr(url))
if url.get_backend_name() == "mysql":
with NamedTemporaryFile(suffix="my.cnf") as f:
content = textwrap.dedent(
f"""
[client]
host = {url.host}
user = {url.username}
password = {url.password or ""}
port = {url.port or "3306"}
database = {url.database}
"""
).strip()
f.write(content.encode())
f.flush()
execute_interactive(["mysql", f"--defaults-extra-file={f.name}"])
elif url.get_backend_name() == "sqlite":
execute_interactive(["sqlite3", url.database])
elif url.get_backend_name() == "postgresql":
env = os.environ.copy()
env["PGHOST"] = url.host or ""
env["PGPORT"] = str(url.port or "5432")
env["PGUSER"] = url.username or ""
# PostgreSQL does not allow the use of PGPASSFILE if the current user is root.
env["PGPASSWORD"] = url.password or ""
env["PGDATABASE"] = url.database
execute_interactive(["psql"], env=env)
elif url.get_backend_name() == "mssql":
env = os.environ.copy()
env["MSSQL_CLI_SERVER"] = url.host
env["MSSQL_CLI_DATABASE"] = url.database
env["MSSQL_CLI_USER"] = url.username
env["MSSQL_CLI_PASSWORD"] = url.password
execute_interactive(["mssql-cli"], env=env)
else:
raise AirflowException(f"Unknown driver: {url.drivername}")
@cli_utils.action_cli(check_db=False)
def check(args):
"""Runs a check command that checks if db is available."""
retries: int = args.retry
retry_delay: int = args.retry_delay
def _warn_remaining_retries(retrystate: RetryCallState):
remain = retries - retrystate.attempt_number
log.warning("%d retries remain. Will retry in %d seconds", remain, retry_delay)
for attempt in Retrying(
stop=stop_after_attempt(1 + retries),
wait=wait_fixed(retry_delay),
reraise=True,
before_sleep=_warn_remaining_retries,
):
with attempt:
db.check()
# lazily imported by CLI parser for `help` command
all_tables = sorted(config_dict)
@cli_utils.action_cli(check_db=False)
def cleanup_tables(args):
"""Purges old records in metadata database."""
run_cleanup(
table_names=args.tables,
dry_run=args.dry_run,
clean_before_timestamp=args.clean_before_timestamp,
verbose=args.verbose,
confirm=not args.yes,
skip_archive=args.skip_archive,
)
@cli_utils.action_cli(check_db=False)
def export_archived(args):
"""Exports archived records from metadata database."""
export_archived_records(
export_format=args.export_format,
output_path=args.output_path,
table_names=args.tables,
drop_archives=args.drop_archives,
needs_confirm=not args.yes,
)
@cli_utils.action_cli(check_db=False)
def drop_archived(args):
"""Drops archived tables from metadata database."""
drop_archived_tables(
table_names=args.tables,
needs_confirm=not args.yes,
)
| 9,356 | 36.578313 | 110 | py |
airflow | airflow-main/airflow/cli/commands/rotate_fernet_key_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rotate Fernet key command."""
from __future__ import annotations
from sqlalchemy import select
from airflow.models import Connection, Variable
from airflow.utils import cli as cli_utils
from airflow.utils.session import create_session
@cli_utils.action_cli
def rotate_fernet_key(args):
"""Rotates all encrypted connection credentials and variables."""
with create_session() as session:
conns_query = select(Connection).where(Connection.is_encrypted | Connection.is_extra_encrypted)
for conn in session.scalars(conns_query):
conn.rotate_fernet_key()
for var in session.scalars(select(Variable).where(Variable.is_encrypted)):
var.rotate_fernet_key()
| 1,496 | 40.583333 | 103 | py |
airflow | airflow-main/airflow/cli/commands/sync_perm_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sync permission command."""
from __future__ import annotations
from airflow.utils import cli as cli_utils
@cli_utils.action_cli
def sync_perm(args):
"""Updates permissions for existing roles and DAGs."""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
print("Updating actions and resources for all existing roles")
# Add missing permissions for all the Base Views _before_ syncing/creating roles
appbuilder.add_permissions(update_perms=True)
appbuilder.sm.sync_roles()
if args.include_dags:
print("Updating permission on all DAG views")
appbuilder.sm.create_dag_specific_permissions()
| 1,519 | 40.081081 | 88 | py |
airflow | airflow-main/airflow/cli/commands/user_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""User sub-commands."""
from __future__ import annotations
import functools
import getpass
import json
import os
import random
import string
from typing import Any
import re2
from marshmallow import Schema, fields, validate
from marshmallow.exceptions import ValidationError
from airflow.cli.simple_table import AirflowConsole
from airflow.utils import cli as cli_utils
from airflow.utils.cli import suppress_logs_and_warning
class UserSchema(Schema):
"""user collection item schema."""
id = fields.Int()
firstname = fields.Str(required=True)
lastname = fields.Str(required=True)
username = fields.Str(required=True)
email = fields.Email(required=True)
roles = fields.List(fields.Str, required=True, validate=validate.Length(min=1))
@suppress_logs_and_warning
def users_list(args):
"""Lists users at the command line."""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
users = appbuilder.sm.get_all_users()
fields = ["id", "username", "email", "first_name", "last_name", "roles"]
AirflowConsole().print_as(
data=users, output=args.output, mapper=lambda x: {f: x.__getattribute__(f) for f in fields}
)
@cli_utils.action_cli(check_db=True)
def users_create(args):
"""Creates new user in the DB."""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
role = appbuilder.sm.find_role(args.role)
if not role:
valid_roles = appbuilder.sm.get_all_roles()
raise SystemExit(f"{args.role} is not a valid role. Valid roles are: {valid_roles}")
if args.use_random_password:
password = "".join(random.choice(string.printable) for _ in range(16))
elif args.password:
password = args.password
else:
password = getpass.getpass("Password:")
password_confirmation = getpass.getpass("Repeat for confirmation:")
if password != password_confirmation:
raise SystemExit("Passwords did not match")
if appbuilder.sm.find_user(args.username):
print(f"{args.username} already exist in the db")
return
user = appbuilder.sm.add_user(
args.username, args.firstname, args.lastname, args.email, role, password
)
if user:
print(f'User "{args.username}" created with role "{args.role}"')
else:
raise SystemExit("Failed to create user")
def _find_user(args):
if not args.username and not args.email:
raise SystemExit("Missing args: must supply one of --username or --email")
if args.username and args.email:
raise SystemExit("Conflicting args: must supply either --username or --email, but not both")
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
user = appbuilder.sm.find_user(username=args.username, email=args.email)
if not user:
raise SystemExit(f'User "{args.username or args.email}" does not exist')
return user
@cli_utils.action_cli
def users_delete(args):
"""Deletes user from DB."""
user = _find_user(args)
# Clear the associated user roles first.
user.roles.clear()
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
if appbuilder.sm.del_register_user(user):
print(f'User "{user.username}" deleted')
else:
raise SystemExit("Failed to delete user")
@cli_utils.action_cli
def users_manage_role(args, remove=False):
"""Deletes or appends user roles."""
user = _find_user(args)
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
role = appbuilder.sm.find_role(args.role)
if not role:
valid_roles = appbuilder.sm.get_all_roles()
raise SystemExit(f'"{args.role}" is not a valid role. Valid roles are: {valid_roles}')
if remove:
if role not in user.roles:
raise SystemExit(f'User "{user.username}" is not a member of role "{args.role}"')
user.roles = [r for r in user.roles if r != role]
appbuilder.sm.update_user(user)
print(f'User "{user.username}" removed from role "{args.role}"')
else:
if role in user.roles:
raise SystemExit(f'User "{user.username}" is already a member of role "{args.role}"')
user.roles.append(role)
appbuilder.sm.update_user(user)
print(f'User "{user.username}" added to role "{args.role}"')
def users_export(args):
"""Exports all users to the json file."""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
users = appbuilder.sm.get_all_users()
fields = ["id", "username", "email", "first_name", "last_name", "roles"]
# In the User model the first and last name fields have underscores,
# but the corresponding parameters in the CLI don't
def remove_underscores(s):
return re2.sub("_", "", s)
users = [
{
remove_underscores(field): user.__getattribute__(field)
if field != "roles"
else [r.name for r in user.roles]
for field in fields
}
for user in users
]
with open(args.export, "w") as file:
file.write(json.dumps(users, sort_keys=True, indent=4))
print(f"{len(users)} users successfully exported to {file.name}")
@cli_utils.action_cli
def users_import(args):
"""Imports users from the json file."""
json_file = getattr(args, "import")
if not os.path.exists(json_file):
raise SystemExit(f"File '{json_file}' does not exist")
users_list = None
try:
with open(json_file) as file:
users_list = json.loads(file.read())
except ValueError as e:
raise SystemExit(f"File '{json_file}' is not valid JSON. Error: {e}")
users_created, users_updated = _import_users(users_list)
if users_created:
print("Created the following users:\n\t{}".format("\n\t".join(users_created)))
if users_updated:
print("Updated the following users:\n\t{}".format("\n\t".join(users_updated)))
def _import_users(users_list: list[dict[str, Any]]):
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
users_created = []
users_updated = []
try:
UserSchema(many=True).load(users_list)
except ValidationError as e:
msg = []
for row_num, failure in e.normalized_messages().items():
msg.append(f"[Item {row_num}]")
for key, value in failure.items():
msg.append(f"\t{key}: {value}")
raise SystemExit(
"Error: Input file didn't pass validation. See below:\n{}".format("\n".join(msg))
)
for user in users_list:
roles = []
for rolename in user["roles"]:
role = appbuilder.sm.find_role(rolename)
if not role:
valid_roles = appbuilder.sm.get_all_roles()
raise SystemExit(
f'Error: "{rolename}" is not a valid role. Valid roles are: {valid_roles}'
)
roles.append(role)
existing_user = appbuilder.sm.find_user(email=user["email"])
if existing_user:
print(f"Found existing user with email '{user['email']}'")
if existing_user.username != user["username"]:
raise SystemExit(
f"Error: Changing the username is not allowed - "
f"please delete and recreate the user with email {user['email']!r}"
)
existing_user.roles = roles
existing_user.first_name = user["firstname"]
existing_user.last_name = user["lastname"]
appbuilder.sm.update_user(existing_user)
users_updated.append(user["email"])
else:
print(f"Creating new user with email '{user['email']}'")
appbuilder.sm.add_user(
username=user["username"],
first_name=user["firstname"],
last_name=user["lastname"],
email=user["email"],
role=roles,
)
users_created.append(user["email"])
return users_created, users_updated
add_role = functools.partial(users_manage_role, remove=False)
remove_role = functools.partial(users_manage_role, remove=True)
| 9,814 | 35.7603 | 103 | py |
airflow | airflow-main/airflow/cli/commands/config_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Config sub-commands."""
from __future__ import annotations
import io
import pygments
from pygments.lexers.configs import IniLexer
from airflow.configuration import conf
from airflow.utils.cli import should_use_colors
from airflow.utils.code_utils import get_terminal_formatter
def show_config(args):
"""Show current application configuration."""
with io.StringIO() as output:
conf.write(output, section=args.section)
code = output.getvalue()
if should_use_colors(args):
code = pygments.highlight(code=code, formatter=get_terminal_formatter(), lexer=IniLexer())
print(code)
def get_value(args):
"""Get one value from configuration."""
if not conf.has_option(args.section, args.option):
raise SystemExit(f"The option [{args.section}/{args.option}] is not found in config.")
value = conf.get(args.section, args.option)
print(value)
| 1,701 | 35.212766 | 102 | py |
airflow | airflow-main/airflow/cli/commands/kubernetes_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Kubernetes sub-commands."""
from __future__ import annotations
import os
import sys
from datetime import datetime, timedelta
from kubernetes import client
from kubernetes.client.api_client import ApiClient
from kubernetes.client.rest import ApiException
from airflow.executors.kubernetes_executor import KubeConfig
from airflow.kubernetes import pod_generator
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.kubernetes_helper_functions import create_pod_id
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DagRun, TaskInstance
from airflow.utils import cli as cli_utils, yaml
from airflow.utils.cli import get_dag
@cli_utils.action_cli
def generate_pod_yaml(args):
"""Generates yaml files for each task in the DAG. Used for testing output of KubernetesExecutor."""
execution_date = args.execution_date
dag = get_dag(subdir=args.subdir, dag_id=args.dag_id)
yaml_output_path = args.output_path
dr = DagRun(dag.dag_id, execution_date=execution_date)
kube_config = KubeConfig()
for task in dag.tasks:
ti = TaskInstance(task, None)
ti.dag_run = dr
pod = PodGenerator.construct_pod(
dag_id=args.dag_id,
task_id=ti.task_id,
pod_id=create_pod_id(args.dag_id, ti.task_id),
try_number=ti.try_number,
kube_image=kube_config.kube_image,
date=ti.execution_date,
args=ti.command_as_list(),
pod_override_object=PodGenerator.from_obj(ti.executor_config),
scheduler_job_id="worker-config",
namespace=kube_config.executor_namespace,
base_worker_pod=PodGenerator.deserialize_model_file(kube_config.pod_template_file),
with_mutation_hook=True,
)
api_client = ApiClient()
date_string = pod_generator.datetime_to_label_safe_datestring(execution_date)
yaml_file_name = f"{args.dag_id}_{ti.task_id}_{date_string}.yml"
os.makedirs(os.path.dirname(yaml_output_path + "/airflow_yaml_output/"), exist_ok=True)
with open(yaml_output_path + "/airflow_yaml_output/" + yaml_file_name, "w") as output:
sanitized_pod = api_client.sanitize_for_serialization(pod)
output.write(yaml.dump(sanitized_pod))
print(f"YAML output can be found at {yaml_output_path}/airflow_yaml_output/")
@cli_utils.action_cli
def cleanup_pods(args):
"""Clean up k8s pods in evicted/failed/succeeded/pending states."""
namespace = args.namespace
min_pending_minutes = args.min_pending_minutes
# protect newly created pods from deletion
if min_pending_minutes < 5:
min_pending_minutes = 5
# https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/
# All Containers in the Pod have terminated in success, and will not be restarted.
pod_succeeded = "succeeded"
# The Pod has been accepted by the Kubernetes cluster,
# but one or more of the containers has not been set up and made ready to run.
pod_pending = "pending"
# All Containers in the Pod have terminated, and at least one Container has terminated in failure.
# That is, the Container either exited with non-zero status or was terminated by the system.
pod_failed = "failed"
# https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/
pod_reason_evicted = "evicted"
# If pod is failed and restartPolicy is:
# * Always: Restart Container; Pod phase stays Running.
# * OnFailure: Restart Container; Pod phase stays Running.
# * Never: Pod phase becomes Failed.
pod_restart_policy_never = "never"
print("Loading Kubernetes configuration")
kube_client = get_kube_client()
print(f"Listing pods in namespace {namespace}")
airflow_pod_labels = [
"dag_id",
"task_id",
"try_number",
"airflow_version",
]
list_kwargs = {"namespace": namespace, "limit": 500, "label_selector": ",".join(airflow_pod_labels)}
while True:
pod_list = kube_client.list_namespaced_pod(**list_kwargs)
for pod in pod_list.items:
pod_name = pod.metadata.name
print(f"Inspecting pod {pod_name}")
pod_phase = pod.status.phase.lower()
pod_reason = pod.status.reason.lower() if pod.status.reason else ""
pod_restart_policy = pod.spec.restart_policy.lower()
current_time = datetime.now(pod.metadata.creation_timestamp.tzinfo)
if (
pod_phase == pod_succeeded
or (pod_phase == pod_failed and pod_restart_policy == pod_restart_policy_never)
or (pod_reason == pod_reason_evicted)
or (
pod_phase == pod_pending
and current_time - pod.metadata.creation_timestamp
> timedelta(minutes=min_pending_minutes)
)
):
print(
f'Deleting pod "{pod_name}" phase "{pod_phase}" and reason "{pod_reason}", '
f'restart policy "{pod_restart_policy}"'
)
try:
_delete_pod(pod.metadata.name, namespace)
except ApiException as e:
print(f"Can't remove POD: {e}", file=sys.stderr)
continue
print(f"No action taken on pod {pod_name}")
continue_token = pod_list.metadata._continue
if not continue_token:
break
list_kwargs["_continue"] = continue_token
def _delete_pod(name, namespace):
"""Helper Function for cleanup_pods."""
kube_client = get_kube_client()
delete_options = client.V1DeleteOptions()
print(f'Deleting POD "{name}" from "{namespace}" namespace')
api_response = kube_client.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options)
print(api_response)
| 6,709 | 41.738854 | 105 | py |
airflow | airflow-main/airflow/cli/commands/task_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task sub-commands."""
from __future__ import annotations
import importlib
import json
import logging
import os
import sys
import textwrap
from contextlib import contextmanager, redirect_stderr, redirect_stdout, suppress
from typing import Generator, Union, cast
import pendulum
from pendulum.parsing.exceptions import ParserError
from sqlalchemy import select
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.session import Session
from airflow import settings
from airflow.cli.simple_table import AirflowConsole
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DagRunNotFound, TaskInstanceNotFound
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.job import Job, run_job
from airflow.jobs.local_task_job_runner import LocalTaskJobRunner
from airflow.listeners.listener import get_listener_manager
from airflow.models import DagPickle, TaskInstance
from airflow.models.dag import DAG
from airflow.models.dagrun import DagRun
from airflow.models.operator import Operator, needs_expansion
from airflow.models.param import ParamsDict
from airflow.models.taskinstance import TaskReturnCode
from airflow.settings import IS_K8S_EXECUTOR_POD
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS
from airflow.typing_compat import Literal, Protocol
from airflow.utils import cli as cli_utils
from airflow.utils.cli import (
get_dag,
get_dag_by_file_location,
get_dag_by_pickle,
get_dags,
should_ignore_depends_on_past,
suppress_logs_and_warning,
)
from airflow.utils.dates import timezone
from airflow.utils.log.file_task_handler import _set_task_deferred_context_var
from airflow.utils.log.logging_mixin import StreamLogWriter
from airflow.utils.log.secrets_masker import RedactedIO
from airflow.utils.net import get_hostname
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.state import DagRunState
log = logging.getLogger(__name__)
CreateIfNecessary = Union[Literal[False], Literal["db"], Literal["memory"]]
def _generate_temporary_run_id() -> str:
"""Generate a ``run_id`` for a DAG run that will be created temporarily.
This is used mostly by ``airflow task test`` to create a DAG run that will
be deleted after the task is run.
"""
return f"__airflow_temporary_run_{timezone.utcnow().isoformat()}__"
def _get_dag_run(
*,
dag: DAG,
create_if_necessary: CreateIfNecessary,
exec_date_or_run_id: str | None = None,
session: Session,
) -> tuple[DagRun, bool]:
"""Try to retrieve a DAG run from a string representing either a run ID or logical date.
This checks DAG runs like this:
1. If the input ``exec_date_or_run_id`` matches a DAG run ID, return the run.
2. Try to parse the input as a date. If that works, and the resulting
date matches a DAG run's logical date, return the run.
3. If ``create_if_necessary`` is *False* and the input works for neither of
the above, raise ``DagRunNotFound``.
4. Try to create a new DAG run. If the input looks like a date, use it as
the logical date; otherwise use it as a run ID and set the logical date
to the current time.
"""
if not exec_date_or_run_id and not create_if_necessary:
raise ValueError("Must provide `exec_date_or_run_id` if not `create_if_necessary`.")
execution_date: pendulum.DateTime | None = None
if exec_date_or_run_id:
dag_run = dag.get_dagrun(run_id=exec_date_or_run_id, session=session)
if dag_run:
return dag_run, False
with suppress(ParserError, TypeError):
execution_date = timezone.parse(exec_date_or_run_id)
try:
dag_run = session.scalars(
select(DagRun).where(DagRun.dag_id == dag.dag_id, DagRun.execution_date == execution_date)
).one()
except NoResultFound:
if not create_if_necessary:
raise DagRunNotFound(
f"DagRun for {dag.dag_id} with run_id or execution_date "
f"of {exec_date_or_run_id!r} not found"
) from None
else:
return dag_run, False
if execution_date is not None:
dag_run_execution_date = execution_date
else:
dag_run_execution_date = pendulum.instance(timezone.utcnow())
if create_if_necessary == "memory":
dag_run = DagRun(
dag.dag_id,
run_id=exec_date_or_run_id,
execution_date=dag_run_execution_date,
data_interval=dag.timetable.infer_manual_data_interval(run_after=dag_run_execution_date),
)
return dag_run, True
elif create_if_necessary == "db":
dag_run = dag.create_dagrun(
state=DagRunState.QUEUED,
execution_date=dag_run_execution_date,
run_id=_generate_temporary_run_id(),
data_interval=dag.timetable.infer_manual_data_interval(run_after=dag_run_execution_date),
session=session,
)
return dag_run, True
raise ValueError(f"unknown create_if_necessary value: {create_if_necessary!r}")
@provide_session
def _get_ti(
task: Operator,
map_index: int,
*,
exec_date_or_run_id: str | None = None,
pool: str | None = None,
create_if_necessary: CreateIfNecessary = False,
session: Session = NEW_SESSION,
) -> tuple[TaskInstance, bool]:
"""Get the task instance through DagRun.run_id, if that fails, get the TI the old way."""
dag = task.dag
if dag is None:
raise ValueError("Cannot get task instance for a task not assigned to a DAG")
if not exec_date_or_run_id and not create_if_necessary:
raise ValueError("Must provide `exec_date_or_run_id` if not `create_if_necessary`.")
if needs_expansion(task):
if map_index < 0:
raise RuntimeError("No map_index passed to mapped task")
elif map_index >= 0:
raise RuntimeError("map_index passed to non-mapped task")
dag_run, dr_created = _get_dag_run(
dag=dag,
exec_date_or_run_id=exec_date_or_run_id,
create_if_necessary=create_if_necessary,
session=session,
)
ti_or_none = dag_run.get_task_instance(task.task_id, map_index=map_index, session=session)
if ti_or_none is None:
if not create_if_necessary:
raise TaskInstanceNotFound(
f"TaskInstance for {dag.dag_id}, {task.task_id}, map={map_index} with "
f"run_id or execution_date of {exec_date_or_run_id!r} not found"
)
# TODO: Validate map_index is in range?
ti = TaskInstance(task, run_id=dag_run.run_id, map_index=map_index)
ti.dag_run = dag_run
else:
ti = ti_or_none
ti.refresh_from_task(task, pool_override=pool)
return ti, dr_created
def _run_task_by_selected_method(args, dag: DAG, ti: TaskInstance) -> None | TaskReturnCode:
"""
Runs the task based on a mode.
Any of the 3 modes are available:
- using LocalTaskJob
- as raw task
- by executor
"""
if args.local:
return _run_task_by_local_task_job(args, ti)
if args.raw:
return _run_raw_task(args, ti)
_run_task_by_executor(args, dag, ti)
return None
def _run_task_by_executor(args, dag: DAG, ti: TaskInstance) -> None:
"""
Sends the task to the executor for execution.
This can result in the task being started by another host if the executor implementation does.
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
pickle_id = pickle.id
# TODO: This should be written to a log
print(f"Pickled dag {dag} as pickle_id: {pickle_id}")
except Exception as e:
print("Could not pickle the DAG")
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.job_id = None
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=should_ignore_depends_on_past(args),
wait_for_past_depends_before_skipping=(args.depends_on_past == "wait"),
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end()
def _run_task_by_local_task_job(args, ti: TaskInstance) -> TaskReturnCode | None:
"""Run LocalTaskJob, which monitors the raw task execution process."""
job_runner = LocalTaskJobRunner(
job=Job(dag_id=ti.dag_id),
task_instance=ti,
mark_success=args.mark_success,
pickle_id=args.pickle,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=should_ignore_depends_on_past(args),
wait_for_past_depends_before_skipping=(args.depends_on_past == "wait"),
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
external_executor_id=_extract_external_executor_id(args),
)
try:
ret = run_job(job=job_runner.job, execute_callable=job_runner._execute)
finally:
if args.shut_down_logging:
logging.shutdown()
with suppress(ValueError):
return TaskReturnCode(ret)
return None
RAW_TASK_UNSUPPORTED_OPTION = [
"ignore_all_dependencies",
"ignore_depends_on_past",
"ignore_dependencies",
"force",
]
def _run_raw_task(args, ti: TaskInstance) -> None | TaskReturnCode:
"""Runs the main task handling code."""
return ti._run_raw_task(
mark_success=args.mark_success,
job_id=args.job_id,
pool=args.pool,
)
def _extract_external_executor_id(args) -> str | None:
if hasattr(args, "external_executor_id"):
return getattr(args, "external_executor_id")
return os.environ.get("external_executor_id", None)
@contextmanager
def _move_task_handlers_to_root(ti: TaskInstance) -> Generator[None, None, None]:
"""
Move handlers for task logging to root logger.
We want anything logged during task run to be propagated to task log handlers.
If running in a k8s executor pod, also keep the stream handler on root logger
so that logs are still emitted to stdout.
"""
# nothing to do
if not ti.log.handlers or settings.DONOT_MODIFY_HANDLERS:
yield
return
# Move task handlers to root and reset task logger and restore original logger settings after exit.
# If k8s executor, we need to ensure that root logger has a console handler, so that
# task logs propagate to stdout (this is how webserver retrieves them while task is running).
root_logger = logging.getLogger()
console_handler = next((h for h in root_logger.handlers if h.name == "console"), None)
with LoggerMutationHelper(root_logger), LoggerMutationHelper(ti.log) as task_helper:
task_helper.move(root_logger)
if IS_K8S_EXECUTOR_POD:
if console_handler and console_handler not in root_logger.handlers:
root_logger.addHandler(console_handler)
yield
@contextmanager
def _redirect_stdout_to_ti_log(ti: TaskInstance) -> Generator[None, None, None]:
"""
Redirect stdout to ti logger.
Redirect stdout and stderr to the task instance log as INFO and WARNING
level messages, respectively.
If stdout already redirected (possible when task running with option
`--local`), don't redirect again.
"""
# if sys.stdout is StreamLogWriter, it means we already redirected
# likely before forking in LocalTaskJob
if not isinstance(sys.stdout, StreamLogWriter):
info_writer = StreamLogWriter(ti.log, logging.INFO)
warning_writer = StreamLogWriter(ti.log, logging.WARNING)
with redirect_stdout(info_writer), redirect_stderr(warning_writer):
yield
else:
yield
class TaskCommandMarker:
"""Marker for listener hooks, to properly detect from which component they are called."""
@cli_utils.action_cli(check_db=False)
def task_run(args, dag: DAG | None = None) -> TaskReturnCode | None:
"""
Run a single task instance.
Note that there must be at least one DagRun for this to start,
i.e. it must have been scheduled and/or triggered previously.
Alternatively, if you just need to run it for testing then use
"airflow tasks test ..." command instead.
"""
# Load custom airflow config
if args.local and args.raw:
raise AirflowException(
"Option --raw and --local are mutually exclusive. "
"Please remove one option to execute the command."
)
if args.raw:
unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]
if unsupported_options:
unsupported_raw_task_flags = ", ".join(f"--{o}" for o in RAW_TASK_UNSUPPORTED_OPTION)
unsupported_flags = ", ".join(f"--{o}" for o in unsupported_options)
raise AirflowException(
"Option --raw does not work with some of the other options on this command. "
"You can't use --raw option and the following options: "
f"{unsupported_raw_task_flags}. "
f"You provided the option {unsupported_flags}. "
"Delete it to execute the command."
)
if dag and args.pickle:
raise AirflowException("You cannot use the --pickle option when using DAG.cli() method.")
if args.cfg_path:
with open(args.cfg_path) as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
conf.read_dict(conf_dict, source=args.cfg_path)
settings.configure_vars()
settings.MASK_SECRETS_IN_LOGS = True
get_listener_manager().hook.on_starting(component=TaskCommandMarker())
if args.pickle:
print(f"Loading pickle id: {args.pickle}")
_dag = get_dag_by_pickle(args.pickle)
elif not dag:
_dag = get_dag(args.subdir, args.dag_id, args.read_from_db)
else:
_dag = dag
task = _dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id, pool=args.pool)
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
log.info("Running %s on host %s", ti, hostname)
# IMPORTANT, have to re-configure ORM with the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
# this should be last thing before running, to reduce likelihood of an open session
# which can cause trouble if running process in a fork.
settings.reconfigure_orm(disable_connection_pool=True)
task_return_code = None
try:
if args.interactive:
task_return_code = _run_task_by_selected_method(args, _dag, ti)
else:
with _move_task_handlers_to_root(ti), _redirect_stdout_to_ti_log(ti):
task_return_code = _run_task_by_selected_method(args, _dag, ti)
if task_return_code == TaskReturnCode.DEFERRED:
_set_task_deferred_context_var()
finally:
try:
get_listener_manager().hook.before_stopping(component=TaskCommandMarker())
except Exception:
pass
return task_return_code
@cli_utils.action_cli(check_db=False)
def task_failed_deps(args) -> None:
"""
Get task instance dependencies that were not met.
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
def task_state(args) -> None:
"""
Returns the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id)
print(ti.current_state())
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
def task_list(args, dag: DAG | None = None) -> None:
"""Lists the tasks within a DAG at the command line."""
dag = dag or get_dag(args.subdir, args.dag_id)
if args.tree:
dag.tree_view()
else:
tasks = sorted(t.task_id for t in dag.tasks)
print("\n".join(tasks))
class _SupportedDebugger(Protocol):
def post_mortem(self) -> None:
...
SUPPORTED_DEBUGGER_MODULES = [
"pudb",
"web_pdb",
"ipdb",
"pdb",
]
def _guess_debugger() -> _SupportedDebugger:
"""
Trying to guess the debugger used by the user.
When it doesn't find any user-installed debugger, returns ``pdb``.
List of supported debuggers:
* `pudb <https://github.com/inducer/pudb>`__
* `web_pdb <https://github.com/romanvm/python-web-pdb>`__
* `ipdb <https://github.com/gotcha/ipdb>`__
* `pdb <https://docs.python.org/3/library/pdb.html>`__
"""
exc: Exception
for mod_name in SUPPORTED_DEBUGGER_MODULES:
try:
return cast(_SupportedDebugger, importlib.import_module(mod_name))
except ImportError as e:
exc = e
raise exc
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
@provide_session
def task_states_for_dag_run(args, session: Session = NEW_SESSION) -> None:
"""Get the status of all task instances in a DagRun."""
dag_run = session.scalar(
select(DagRun).where(DagRun.run_id == args.execution_date_or_run_id, DagRun.dag_id == args.dag_id)
)
if not dag_run:
try:
execution_date = timezone.parse(args.execution_date_or_run_id)
dag_run = session.scalar(
select(DagRun).where(DagRun.execution_date == execution_date, DagRun.dag_id == args.dag_id)
)
except (ParserError, TypeError) as err:
raise AirflowException(f"Error parsing the supplied execution_date. Error: {str(err)}")
if dag_run is None:
raise DagRunNotFound(
f"DagRun for {args.dag_id} with run_id or execution_date of {args.execution_date_or_run_id!r} "
"not found"
)
has_mapped_instances = any(ti.map_index >= 0 for ti in dag_run.task_instances)
def format_task_instance(ti: TaskInstance) -> dict[str, str]:
data = {
"dag_id": ti.dag_id,
"execution_date": dag_run.execution_date.isoformat(),
"task_id": ti.task_id,
"state": ti.state,
"start_date": ti.start_date.isoformat() if ti.start_date else "",
"end_date": ti.end_date.isoformat() if ti.end_date else "",
}
if has_mapped_instances:
data["map_index"] = str(ti.map_index) if ti.map_index >= 0 else ""
return data
AirflowConsole().print_as(data=dag_run.task_instances, output=args.output, mapper=format_task_instance)
@cli_utils.action_cli(check_db=False)
def task_test(args, dag: DAG | None = None) -> None:
"""Tests task for a given dag_id."""
# We want to log output from operators etc to show up here. Normally
# airflow.task would redirect to a file, but here we want it to propagate
# up to the normal airflow handler.
settings.MASK_SECRETS_IN_LOGS = True
handlers = logging.getLogger("airflow.task").handlers
already_has_stream_handler = False
for handler in handlers:
already_has_stream_handler = isinstance(handler, logging.StreamHandler)
if already_has_stream_handler:
break
if not already_has_stream_handler:
logging.getLogger("airflow.task").propagate = True
env_vars = {"AIRFLOW_TEST_MODE": "True"}
if args.env_vars:
env_vars.update(args.env_vars)
os.environ.update(env_vars)
dag = dag or get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
if task.params and isinstance(task.params, ParamsDict):
task.params.validate()
ti, dr_created = _get_ti(
task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id, create_if_necessary="db"
)
try:
with redirect_stdout(RedactedIO()):
if args.dry_run:
ti.dry_run()
else:
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
except Exception:
if args.post_mortem:
debugger = _guess_debugger()
debugger.post_mortem()
else:
raise
finally:
if not already_has_stream_handler:
# Make sure to reset back to normal. When run for CLI this doesn't
# matter, but it does for test suite
logging.getLogger("airflow.task").propagate = False
if dr_created:
with create_session() as session:
session.delete(ti.dag_run)
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
def task_render(args, dag: DAG | None = None) -> None:
"""Renders and displays templated fields for a given task."""
if not dag:
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(
task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id, create_if_necessary="memory"
)
ti.render_templates()
for attr in task.template_fields:
print(
textwrap.dedent(
f""" # ----------------------------------------------------------
# property: {attr}
# ----------------------------------------------------------
{getattr(ti.task, attr)}
"""
)
)
@cli_utils.action_cli(check_db=False)
def task_clear(args) -> None:
"""Clears all task instances or only those matched by regex for a DAG(s)."""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:
dags = [get_dag_by_file_location(args.dag_id)]
else:
# todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?
dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.partial_subset(
task_ids_or_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.yes,
include_subdags=not args.exclude_subdags,
include_parentdag=not args.exclude_parentdag,
)
class LoggerMutationHelper:
"""
Helper for moving and resetting handlers and other logger attrs.
:meta private:
"""
def __init__(self, logger: logging.Logger) -> None:
self.handlers = logger.handlers[:]
self.level = logger.level
self.propagate = logger.propagate
self.source_logger = logger
def apply(self, logger: logging.Logger, replace: bool = True) -> None:
"""
Set ``logger`` with attrs stored on instance.
If ``logger`` is root logger, don't change propagate.
"""
if replace:
logger.handlers[:] = self.handlers
else:
for h in self.handlers:
if h not in logger.handlers:
logger.addHandler(h)
logger.level = self.level
if logger is not logging.getLogger():
logger.propagate = self.propagate
def move(self, logger: logging.Logger, replace: bool = True) -> None:
"""
Replace ``logger`` attrs with those from source.
:param logger: target logger
:param replace: if True, remove all handlers from target first; otherwise add if not present.
"""
self.apply(logger, replace=replace)
self.source_logger.propagate = True
self.source_logger.handlers[:] = []
def reset(self) -> None:
self.apply(self.source_logger)
def __enter__(self) -> LoggerMutationHelper:
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.reset()
| 26,934 | 35.696185 | 109 | py |
airflow | airflow-main/airflow/cli/commands/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/cli/commands/role_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Roles sub-commands."""
from __future__ import annotations
import collections
import itertools
import json
import os
from airflow.cli.simple_table import AirflowConsole
from airflow.utils import cli as cli_utils
from airflow.utils.cli import suppress_logs_and_warning
from airflow.www.fab_security.sqla.models import Action, Permission, Resource, Role
from airflow.www.security import EXISTING_ROLES
@suppress_logs_and_warning
def roles_list(args):
"""Lists all existing roles."""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
roles = appbuilder.sm.get_all_roles()
if not args.permission:
AirflowConsole().print_as(
data=sorted(r.name for r in roles), output=args.output, mapper=lambda x: {"name": x}
)
return
permission_map: dict[tuple[str, str], list[str]] = collections.defaultdict(list)
for role in roles:
for permission in role.permissions:
permission_map[(role.name, permission.resource.name)].append(permission.action.name)
AirflowConsole().print_as(
data=sorted(permission_map),
output=args.output,
mapper=lambda x: {"name": x[0], "resource": x[1], "action": ",".join(sorted(permission_map[x]))},
)
@cli_utils.action_cli
@suppress_logs_and_warning
def roles_create(args):
"""Creates new empty role in DB."""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
for role_name in args.role:
appbuilder.sm.add_role(role_name)
print(f"Added {len(args.role)} role(s)")
@cli_utils.action_cli
@suppress_logs_and_warning
def roles_delete(args):
"""Deletes role in DB."""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
for role_name in args.role:
role = appbuilder.sm.find_role(role_name)
if not role:
print(f"Role named '{role_name}' does not exist")
exit(1)
for role_name in args.role:
appbuilder.sm.delete_role(role_name)
print(f"Deleted {len(args.role)} role(s)")
def __roles_add_or_remove_permissions(args):
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
is_add: bool = args.subcommand.startswith("add")
role_map = {}
perm_map: dict[tuple[str, str], set[str]] = collections.defaultdict(set)
asm = appbuilder.sm
for name in args.role:
role: Role | None = asm.find_role(name)
if not role:
print(f"Role named '{name}' does not exist")
exit(1)
role_map[name] = role
for permission in role.permissions:
perm_map[(name, permission.resource.name)].add(permission.action.name)
for name in args.resource:
resource: Resource | None = asm.get_resource(name)
if not resource:
print(f"Resource named '{name}' does not exist")
exit(1)
for name in args.action or []:
action: Action | None = asm.get_action(name)
if not action:
print(f"Action named '{name}' does not exist")
exit(1)
permission_count = 0
for (role_name, resource_name, action_name) in list(
itertools.product(args.role, args.resource, args.action or [None])
):
res_key = (role_name, resource_name)
if is_add and action_name not in perm_map[res_key]:
perm: Permission | None = asm.create_permission(action_name, resource_name)
asm.add_permission_to_role(role_map[role_name], perm)
print(f"Added {perm} to role {role_name}")
permission_count += 1
elif not is_add and res_key in perm_map:
for _action_name in perm_map[res_key] if action_name is None else [action_name]:
perm: Permission | None = asm.get_permission(_action_name, resource_name)
asm.remove_permission_from_role(role_map[role_name], perm)
print(f"Deleted {perm} from role {role_name}")
permission_count += 1
print(f"{'Added' if is_add else 'Deleted'} {permission_count} permission(s)")
@cli_utils.action_cli
@suppress_logs_and_warning
def roles_add_perms(args):
"""Adds permissions to role in DB."""
__roles_add_or_remove_permissions(args)
@cli_utils.action_cli
@suppress_logs_and_warning
def roles_del_perms(args):
"""Deletes permissions from role in DB."""
__roles_add_or_remove_permissions(args)
@suppress_logs_and_warning
def roles_export(args):
"""
Exports all the roles from the database to a file.
Note, this function does not export the permissions associated for each role.
Strictly, it exports the role names into the passed role json file.
"""
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
roles = appbuilder.sm.get_all_roles()
exporting_roles = [role.name for role in roles if role.name not in EXISTING_ROLES]
filename = os.path.expanduser(args.file)
kwargs = {} if not args.pretty else {"sort_keys": True, "indent": 4}
with open(filename, "w", encoding="utf-8") as f:
json.dump(exporting_roles, f, **kwargs)
print(f"{len(exporting_roles)} roles successfully exported to {filename}")
@cli_utils.action_cli
@suppress_logs_and_warning
def roles_import(args):
"""
Import all the roles into the db from the given json file.
Note, this function does not import the permissions for different roles and import them as well.
Strictly, it imports the role names in the role json file passed.
"""
json_file = args.file
try:
with open(json_file) as f:
role_list = json.load(f)
except FileNotFoundError:
print(f"File '{json_file}' does not exist")
exit(1)
except ValueError as e:
print(f"File '{json_file}' is not a valid JSON file. Error: {e}")
exit(1)
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
existing_roles = [role.name for role in appbuilder.sm.get_all_roles()]
roles_to_import = [role for role in role_list if role not in existing_roles]
for role_name in roles_to_import:
appbuilder.sm.add_role(role_name)
print(f"roles '{roles_to_import}' successfully imported")
| 7,488 | 36.445 | 105 | py |
airflow | airflow-main/airflow/cli/commands/celery_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Celery command."""
from __future__ import annotations
import logging
import sys
from contextlib import contextmanager
from multiprocessing import Process
import daemon
import psutil
import sqlalchemy.exc
from celery import maybe_patch_concurrency # type: ignore[attr-defined]
from celery.app.defaults import DEFAULT_TASK_LOG_FMT
from celery.signals import after_setup_logger
from daemon.pidfile import TimeoutPIDLockFile
from lockfile.pidlockfile import read_pid_from_pidfile, remove_existing_pidfile
from airflow import settings
from airflow.configuration import conf
from airflow.providers.celery.executors.celery_executor import app as celery_app
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
from airflow.utils.serve_logs import serve_logs
WORKER_PROCESS_NAME = "worker"
@cli_utils.action_cli
def flower(args):
"""Starts Flower, Celery monitoring tool."""
options = [
"flower",
conf.get("celery", "BROKER_URL"),
f"--address={args.hostname}",
f"--port={args.port}",
]
if args.broker_api:
options.append(f"--broker-api={args.broker_api}")
if args.url_prefix:
options.append(f"--url-prefix={args.url_prefix}")
if args.basic_auth:
options.append(f"--basic-auth={args.basic_auth}")
if args.flower_conf:
options.append(f"--conf={args.flower_conf}")
if args.daemon:
pidfile, stdout, stderr, _ = setup_locations(
process="flower",
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
with open(stdout, "a") as stdout, open(stderr, "a") as stderr:
stdout.truncate(0)
stderr.truncate(0)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile, -1),
stdout=stdout,
stderr=stderr,
umask=int(settings.DAEMON_UMASK, 8),
)
with ctx:
celery_app.start(options)
else:
celery_app.start(options)
@contextmanager
def _serve_logs(skip_serve_logs: bool = False):
"""Starts serve_logs sub-process."""
sub_proc = None
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
yield
if sub_proc:
sub_proc.terminate()
@after_setup_logger.connect()
def logger_setup_handler(logger, **kwargs):
"""
Reconfigure the logger.
* remove any previously configured handlers
* logs of severity error, and above goes to stderr,
* logs of severity lower than error goes to stdout.
"""
if conf.getboolean("logging", "celery_stdout_stderr_separation", fallback=False):
celery_formatter = logging.Formatter(DEFAULT_TASK_LOG_FMT)
class NoErrorOrAboveFilter(logging.Filter):
"""Allow only logs with level *lower* than ERROR to be reported."""
def filter(self, record):
return record.levelno < logging.ERROR
below_error_handler = logging.StreamHandler(sys.stdout)
below_error_handler.addFilter(NoErrorOrAboveFilter())
below_error_handler.setFormatter(celery_formatter)
from_error_handler = logging.StreamHandler(sys.stderr)
from_error_handler.setLevel(logging.ERROR)
from_error_handler.setFormatter(celery_formatter)
logger.handlers[:] = [below_error_handler, from_error_handler]
@cli_utils.action_cli
def worker(args):
"""Starts Airflow Celery worker."""
# Disable connection pool so that celery worker does not hold an unnecessary db connection
settings.reconfigure_orm(disable_connection_pool=True)
if not settings.validate_session():
raise SystemExit("Worker exiting, database connection precheck failed.")
autoscale = args.autoscale
skip_serve_logs = args.skip_serve_logs
if autoscale is None and conf.has_option("celery", "worker_autoscale"):
autoscale = conf.get("celery", "worker_autoscale")
# Setup locations
pid_file_path, stdout, stderr, log_file = setup_locations(
process=WORKER_PROCESS_NAME,
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
if hasattr(celery_app.backend, "ResultSession"):
# Pre-create the database tables now, otherwise SQLA via Celery has a
# race condition where one of the subprocesses can die with "Table
# already exists" error, because SQLA checks for which tables exist,
# then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT
# EXISTS
try:
session = celery_app.backend.ResultSession()
session.close()
except sqlalchemy.exc.IntegrityError:
# At least on postgres, trying to create a table that already exist
# gives a unique constraint violation or the
# "pg_type_typname_nsp_index" table. If this happens we can ignore
# it, we raced to create the tables and lost.
pass
# backwards-compatible: https://github.com/apache/airflow/pull/21506#pullrequestreview-879893763
celery_log_level = conf.get("logging", "CELERY_LOGGING_LEVEL")
if not celery_log_level:
celery_log_level = conf.get("logging", "LOGGING_LEVEL")
# Setup Celery worker
options = [
"worker",
"-O",
"fair",
"--queues",
args.queues,
"--concurrency",
args.concurrency,
"--hostname",
args.celery_hostname,
"--loglevel",
celery_log_level,
"--pidfile",
pid_file_path,
]
if autoscale:
options.extend(["--autoscale", autoscale])
if args.without_mingle:
options.append("--without-mingle")
if args.without_gossip:
options.append("--without-gossip")
if conf.has_option("celery", "pool"):
pool = conf.get("celery", "pool")
options.extend(["--pool", pool])
# Celery pools of type eventlet and gevent use greenlets, which
# requires monkey patching the app:
# https://eventlet.net/doc/patching.html#monkey-patch
# Otherwise task instances hang on the workers and are never
# executed.
maybe_patch_concurrency(["-P", pool])
if args.daemon:
# Run Celery worker as daemon
handle = setup_logging(log_file)
with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle:
if args.umask:
umask = args.umask
else:
umask = conf.get("celery", "worker_umask", fallback=settings.DAEMON_UMASK)
stdout_handle.truncate(0)
stderr_handle.truncate(0)
daemon_context = daemon.DaemonContext(
files_preserve=[handle],
umask=int(umask, 8),
stdout=stdout_handle,
stderr=stderr_handle,
)
with daemon_context, _serve_logs(skip_serve_logs):
celery_app.worker_main(options)
else:
# Run Celery worker in the same process
with _serve_logs(skip_serve_logs):
celery_app.worker_main(options)
@cli_utils.action_cli
def stop_worker(args):
"""Sends SIGTERM to Celery worker."""
# Read PID from file
if args.pid:
pid_file_path = args.pid
else:
pid_file_path, _, _, _ = setup_locations(process=WORKER_PROCESS_NAME)
pid = read_pid_from_pidfile(pid_file_path)
# Send SIGTERM
if pid:
worker_process = psutil.Process(pid)
worker_process.terminate()
# Remove pid file
remove_existing_pidfile(pid_file_path)
| 8,526 | 32.703557 | 100 | py |
airflow | airflow-main/airflow/cli/commands/dag_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Dag sub-commands."""
from __future__ import annotations
import ast
import errno
import json
import logging
import operator
import signal
import subprocess
import sys
import warnings
from graphviz.dot import Dot
from sqlalchemy import delete, select
from sqlalchemy.orm import Session
from airflow import settings
from airflow.api.client import get_current_api_client
from airflow.api_connexion.schemas.dag_schema import dag_schema
from airflow.cli.simple_table import AirflowConsole
from airflow.configuration import conf
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.jobs.job import Job
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
from airflow.timetables.base import DataInterval
from airflow.utils import cli as cli_utils, timezone
from airflow.utils.cli import get_dag, get_dags, process_subdir, sigint_handler, suppress_logs_and_warning
from airflow.utils.dot_renderer import render_dag, render_dag_dependencies
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.state import DagRunState
log = logging.getLogger(__name__)
def _run_dag_backfill(dags: list[DAG], args) -> None:
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
run_conf = None
if args.conf:
run_conf = json.loads(args.conf)
for dag in dags:
if args.task_regex:
dag = dag.partial_subset(
task_ids_or_regex=args.task_regex, include_upstream=not args.ignore_dependencies
)
if not dag.task_dict:
raise AirflowException(
f"There are no tasks that match '{args.task_regex}' regex. Nothing to run, exiting..."
)
if args.dry_run:
print(f"Dry run of DAG {dag.dag_id} on {args.start_date}")
dagrun_infos = dag.iter_dagrun_infos_between(earliest=args.start_date, latest=args.end_date)
for dagrun_info in dagrun_infos:
dr = DagRun(
dag.dag_id,
execution_date=dagrun_info.logical_date,
data_interval=dagrun_info.data_interval,
)
for task in dag.tasks:
print(f"Task {task.task_id} located in DAG {dag.dag_id}")
ti = TaskInstance(task, run_id=None)
ti.dag_run = dr
ti.dry_run()
else:
if args.reset_dagruns:
DAG.clear_dags(
[dag],
start_date=args.start_date,
end_date=args.end_date,
confirm_prompt=not args.yes,
include_subdags=True,
dag_run_state=DagRunState.QUEUED,
)
try:
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
local=args.local,
donot_pickle=(args.donot_pickle or conf.getboolean("core", "donot_pickle")),
ignore_first_depends_on_past=args.ignore_first_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
pool=args.pool,
delay_on_limit_secs=args.delay_on_limit,
verbose=args.verbose,
conf=run_conf,
rerun_failed_tasks=args.rerun_failed_tasks,
run_backwards=args.run_backwards,
continue_on_failures=args.continue_on_failures,
disable_retry=args.disable_retry,
)
except ValueError as vr:
print(str(vr))
sys.exit(1)
@cli_utils.action_cli
def dag_backfill(args, dag: list[DAG] | DAG | None = None) -> None:
"""Creates backfill job or dry run for a DAG or list of DAGs using regex."""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
signal.signal(signal.SIGTERM, sigint_handler)
warnings.warn(
"--ignore-first-depends-on-past is deprecated as the value is always set to True",
category=RemovedInAirflow3Warning,
)
if args.ignore_first_depends_on_past is False:
args.ignore_first_depends_on_past = True
if not args.start_date and not args.end_date:
raise AirflowException("Provide a start_date and/or end_date")
if not dag:
dags = get_dags(args.subdir, dag_id=args.dag_id, use_regex=args.treat_dag_as_regex)
elif isinstance(dag, list):
dags = dag
else:
dags = [dag]
del dag
dags.sort(key=lambda d: d.dag_id)
_run_dag_backfill(dags, args)
if len(dags) > 1:
log.info("All of the backfills are done.")
@cli_utils.action_cli
def dag_trigger(args) -> None:
"""Creates a dag run for the specified dag."""
api_client = get_current_api_client()
try:
message = api_client.trigger_dag(
dag_id=args.dag_id,
run_id=args.run_id,
conf=args.conf,
execution_date=args.exec_date,
replace_microseconds=args.replace_microseconds,
)
AirflowConsole().print_as(
data=[message] if message is not None else [],
output=args.output,
)
except OSError as err:
raise AirflowException(err)
@cli_utils.action_cli
def dag_delete(args) -> None:
"""Deletes all DB records related to the specified dag."""
api_client = get_current_api_client()
if (
args.yes
or input("This will drop all existing records related to the specified DAG. Proceed? (y/n)").upper()
== "Y"
):
try:
message = api_client.delete_dag(dag_id=args.dag_id)
print(message)
except OSError as err:
raise AirflowException(err)
else:
print("Cancelled")
@cli_utils.action_cli
def dag_pause(args) -> None:
"""Pauses a DAG."""
set_is_paused(True, args)
@cli_utils.action_cli
def dag_unpause(args) -> None:
"""Unpauses a DAG."""
set_is_paused(False, args)
def set_is_paused(is_paused: bool, args) -> None:
"""Sets is_paused for DAG by a given dag_id."""
dag = DagModel.get_dagmodel(args.dag_id)
if not dag:
raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table")
dag.set_is_paused(is_paused=is_paused)
print(f"Dag: {args.dag_id}, paused: {is_paused}")
def dag_dependencies_show(args) -> None:
"""Displays DAG dependencies, save to file or show as imgcat image."""
dot = render_dag_dependencies(SerializedDagModel.get_dag_dependencies())
filename = args.save
imgcat = args.imgcat
if filename and imgcat:
raise SystemExit(
"Option --save and --imgcat are mutually exclusive. "
"Please remove one option to execute the command.",
)
elif filename:
_save_dot_to_file(dot, filename)
elif imgcat:
_display_dot_via_imgcat(dot)
else:
print(dot.source)
def dag_show(args) -> None:
"""Displays DAG or saves it's graphic representation to the file."""
dag = get_dag(args.subdir, args.dag_id)
dot = render_dag(dag)
filename = args.save
imgcat = args.imgcat
if filename and imgcat:
raise SystemExit(
"Option --save and --imgcat are mutually exclusive. "
"Please remove one option to execute the command.",
)
elif filename:
_save_dot_to_file(dot, filename)
elif imgcat:
_display_dot_via_imgcat(dot)
else:
print(dot.source)
def _display_dot_via_imgcat(dot: Dot) -> None:
data = dot.pipe(format="png")
try:
with subprocess.Popen("imgcat", stdout=subprocess.PIPE, stdin=subprocess.PIPE) as proc:
out, err = proc.communicate(data)
if out:
print(out.decode("utf-8"))
if err:
print(err.decode("utf-8"))
except OSError as e:
if e.errno == errno.ENOENT:
raise SystemExit("Failed to execute. Make sure the imgcat executables are on your systems 'PATH'")
else:
raise
def _save_dot_to_file(dot: Dot, filename: str) -> None:
filename_without_ext, _, ext = filename.rpartition(".")
dot.render(filename=filename_without_ext, format=ext, cleanup=True)
print(f"File {filename} saved")
@cli_utils.action_cli
@provide_session
def dag_state(args, session: Session = NEW_SESSION) -> None:
"""
Returns the state (and conf if exists) of a DagRun at the command line.
>>> airflow dags state tutorial 2015-01-01T00:00:00.000000
running
>>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000
failed, {"name": "bob", "age": "42"}
"""
dag = DagModel.get_dagmodel(args.dag_id, session=session)
if not dag:
raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table")
dr = session.scalar(select(DagRun).filter_by(dag_id=args.dag_id, execution_date=args.execution_date))
out = dr.state if dr else None
conf_out = ""
if out and dr.conf:
conf_out = ", " + json.dumps(dr.conf)
print(str(out) + conf_out)
@cli_utils.action_cli
def dag_next_execution(args) -> None:
"""
Returns the next execution datetime of a DAG at the command line.
>>> airflow dags next-execution tutorial
2018-08-31 10:38:00
"""
dag = get_dag(args.subdir, args.dag_id)
if dag.get_is_paused():
print("[INFO] Please be reminded this DAG is PAUSED now.", file=sys.stderr)
with create_session() as session:
last_parsed_dag: DagModel = session.scalars(
select(DagModel).where(DagModel.dag_id == dag.dag_id)
).one()
def print_execution_interval(interval: DataInterval | None):
if interval is None:
print(
"[WARN] No following schedule can be found. "
"This DAG may have schedule interval '@once' or `None`.",
file=sys.stderr,
)
print(None)
return
print(interval.start.isoformat())
next_interval = dag.get_next_data_interval(last_parsed_dag)
print_execution_interval(next_interval)
for _ in range(1, args.num_executions):
next_info = dag.next_dagrun_info(next_interval, restricted=False)
next_interval = None if next_info is None else next_info.data_interval
print_execution_interval(next_interval)
@cli_utils.action_cli
@suppress_logs_and_warning
def dag_list_dags(args) -> None:
"""Displays dags with or without stats at the command line."""
dagbag = DagBag(process_subdir(args.subdir))
if dagbag.import_errors:
from rich import print as rich_print
rich_print(
"[red][bold]Error:[/bold] Failed to load all files. "
"For details, run `airflow dags list-import-errors`",
file=sys.stderr,
)
AirflowConsole().print_as(
data=sorted(dagbag.dags.values(), key=operator.attrgetter("dag_id")),
output=args.output,
mapper=lambda x: {
"dag_id": x.dag_id,
"filepath": x.filepath,
"owner": x.owner,
"paused": x.get_is_paused(),
},
)
@cli_utils.action_cli
@suppress_logs_and_warning
@provide_session
def dag_details(args, session=NEW_SESSION):
"""Get DAG details given a DAG id."""
dag = DagModel.get_dagmodel(args.dag_id, session=session)
if not dag:
raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table")
dag_detail = dag_schema.dump(dag)
if args.output in ["table", "plain"]:
data = [{"property_name": key, "property_value": value} for key, value in dag_detail.items()]
else:
data = [dag_detail]
AirflowConsole().print_as(
data=data,
output=args.output,
)
@cli_utils.action_cli
@suppress_logs_and_warning
def dag_list_import_errors(args) -> None:
"""Displays dags with import errors on the command line."""
dagbag = DagBag(process_subdir(args.subdir))
data = []
for filename, errors in dagbag.import_errors.items():
data.append({"filepath": filename, "error": errors})
AirflowConsole().print_as(
data=data,
output=args.output,
)
@cli_utils.action_cli
@suppress_logs_and_warning
def dag_report(args) -> None:
"""Displays dagbag stats at the command line."""
dagbag = DagBag(process_subdir(args.subdir))
AirflowConsole().print_as(
data=dagbag.dagbag_stats,
output=args.output,
mapper=lambda x: {
"file": x.file,
"duration": x.duration,
"dag_num": x.dag_num,
"task_num": x.task_num,
"dags": sorted(ast.literal_eval(x.dags)),
},
)
@cli_utils.action_cli
@suppress_logs_and_warning
@provide_session
def dag_list_jobs(args, dag: DAG | None = None, session: Session = NEW_SESSION) -> None:
"""Lists latest n jobs."""
queries = []
if dag:
args.dag_id = dag.dag_id
if args.dag_id:
dag = DagModel.get_dagmodel(args.dag_id, session=session)
if not dag:
raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table")
queries.append(Job.dag_id == args.dag_id)
if args.state:
queries.append(Job.state == args.state)
fields = ["dag_id", "state", "job_type", "start_date", "end_date"]
all_jobs_iter = session.scalars(
select(Job).where(*queries).order_by(Job.start_date.desc()).limit(args.limit)
)
all_jobs = [{f: str(job.__getattribute__(f)) for f in fields} for job in all_jobs_iter]
AirflowConsole().print_as(
data=all_jobs,
output=args.output,
)
@cli_utils.action_cli
@suppress_logs_and_warning
@provide_session
def dag_list_dag_runs(args, dag: DAG | None = None, session: Session = NEW_SESSION) -> None:
"""Lists dag runs for a given DAG."""
if dag:
args.dag_id = dag.dag_id
else:
dag = DagModel.get_dagmodel(args.dag_id, session=session)
if not dag:
raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table")
state = args.state.lower() if args.state else None
dag_runs = DagRun.find(
dag_id=args.dag_id,
state=state,
no_backfills=args.no_backfill,
execution_start_date=args.start_date,
execution_end_date=args.end_date,
session=session,
)
dag_runs.sort(key=lambda x: x.execution_date, reverse=True)
AirflowConsole().print_as(
data=dag_runs,
output=args.output,
mapper=lambda dr: {
"dag_id": dr.dag_id,
"run_id": dr.run_id,
"state": dr.state,
"execution_date": dr.execution_date.isoformat(),
"start_date": dr.start_date.isoformat() if dr.start_date else "",
"end_date": dr.end_date.isoformat() if dr.end_date else "",
},
)
@provide_session
@cli_utils.action_cli
def dag_test(args, dag: DAG | None = None, session: Session = NEW_SESSION) -> None:
"""Execute one single DagRun for a given DAG and execution date."""
run_conf = None
if args.conf:
try:
run_conf = json.loads(args.conf)
except ValueError as e:
raise SystemExit(f"Configuration {args.conf!r} is not valid JSON. Error: {e}")
execution_date = args.execution_date or timezone.utcnow()
dag = dag or get_dag(subdir=args.subdir, dag_id=args.dag_id)
dag.test(execution_date=execution_date, run_conf=run_conf, session=session)
show_dagrun = args.show_dagrun
imgcat = args.imgcat_dagrun
filename = args.save_dagrun
if show_dagrun or imgcat or filename:
tis = session.scalars(
select(TaskInstance).where(
TaskInstance.dag_id == args.dag_id,
TaskInstance.execution_date == execution_date,
)
).all()
dot_graph = render_dag(dag, tis=tis)
print()
if filename:
_save_dot_to_file(dot_graph, filename)
if imgcat:
_display_dot_via_imgcat(dot_graph)
if show_dagrun:
print(dot_graph.source)
@provide_session
@cli_utils.action_cli
def dag_reserialize(args, session: Session = NEW_SESSION) -> None:
"""Serialize a DAG instance."""
session.execute(delete(SerializedDagModel).execution_options(synchronize_session=False))
if not args.clear_only:
dagbag = DagBag(process_subdir(args.subdir))
dagbag.sync_to_db(session=session)
| 17,647 | 32.615238 | 110 | py |
airflow | airflow-main/airflow/cli/commands/standalone_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import os
import random
import socket
import subprocess
import threading
import time
from collections import deque
from termcolor import colored
from airflow.configuration import AIRFLOW_HOME, conf, make_group_other_inaccessible
from airflow.executors import executor_constants
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job_runner import BaseJobRunner
from airflow.jobs.job import most_recent_job
from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
from airflow.jobs.triggerer_job_runner import TriggererJobRunner
from airflow.utils import db
class StandaloneCommand:
"""
Runs all components of Airflow under a single parent process.
Useful for local development.
"""
@classmethod
def entrypoint(cls, args):
"""CLI entrypoint, called by the main CLI system."""
StandaloneCommand().run()
def __init__(self):
self.subcommands = {}
self.output_queue = deque()
self.user_info = {}
self.ready_time = None
self.ready_delay = 3
def run(self):
"""Main run loop."""
self.print_output("standalone", "Starting Airflow Standalone")
# Silence built-in logging at INFO
logging.getLogger("").setLevel(logging.WARNING)
# Startup checks and prep
env = self.calculate_env()
self.initialize_database()
# Set up commands to run
self.subcommands["scheduler"] = SubCommand(
self,
name="scheduler",
command=["scheduler"],
env=env,
)
self.subcommands["webserver"] = SubCommand(
self,
name="webserver",
command=["webserver"],
env=env,
)
self.subcommands["triggerer"] = SubCommand(
self,
name="triggerer",
command=["triggerer"],
env=env,
)
self.web_server_port = conf.getint("webserver", "WEB_SERVER_PORT", fallback=8080)
# Run subcommand threads
for command in self.subcommands.values():
command.start()
# Run output loop
shown_ready = False
while True:
try:
# Print all the current lines onto the screen
self.update_output()
# Print info banner when all components are ready and the
# delay has passed
if not self.ready_time and self.is_ready():
self.ready_time = time.monotonic()
if (
not shown_ready
and self.ready_time
and time.monotonic() - self.ready_time > self.ready_delay
):
self.print_ready()
shown_ready = True
# Ensure we idle-sleep rather than fast-looping
time.sleep(0.1)
except KeyboardInterrupt:
break
# Stop subcommand threads
self.print_output("standalone", "Shutting down components")
for command in self.subcommands.values():
command.stop()
for command in self.subcommands.values():
command.join()
self.print_output("standalone", "Complete")
def update_output(self):
"""Drains the output queue and prints its contents to the screen."""
while self.output_queue:
# Extract info
name, line = self.output_queue.popleft()
# Make line printable
line_str = line.decode("utf8").strip()
self.print_output(name, line_str)
def print_output(self, name: str, output):
"""
Prints an output line with name and colouring.
You can pass multiple lines to output if you wish; it will be split for you.
"""
color = {
"webserver": "green",
"scheduler": "blue",
"triggerer": "cyan",
"standalone": "white",
}.get(name, "white")
colorised_name = colored("%10s" % name, color)
for line in output.split("\n"):
print(f"{colorised_name} | {line.strip()}")
def print_error(self, name: str, output):
"""
Prints an error message to the console.
This is the same as print_output but with the text red
"""
self.print_output(name, colored(output, "red"))
def calculate_env(self):
"""
Works out the environment variables needed to run subprocesses.
We override some settings as part of being standalone.
"""
env = dict(os.environ)
# Make sure we're using a local executor flavour
executor_class, _ = ExecutorLoader.import_default_executor_cls()
if not executor_class.is_local:
if "sqlite" in conf.get("database", "sql_alchemy_conn"):
self.print_output("standalone", "Forcing executor to SequentialExecutor")
env["AIRFLOW__CORE__EXECUTOR"] = executor_constants.SEQUENTIAL_EXECUTOR
else:
self.print_output("standalone", "Forcing executor to LocalExecutor")
env["AIRFLOW__CORE__EXECUTOR"] = executor_constants.LOCAL_EXECUTOR
return env
def initialize_database(self):
"""Makes sure all the tables are created."""
# Set up DB tables
self.print_output("standalone", "Checking database is initialized")
db.initdb()
self.print_output("standalone", "Database ready")
# See if a user needs creating
# We want a streamlined first-run experience, but we do not want to
# use a preset password as people will inevitably run this on a public
# server. Thus, we make a random password and store it in AIRFLOW_HOME,
# with the reasoning that if you can read that directory, you can see
# the database credentials anyway.
from airflow.utils.cli_app_builder import get_application_builder
with get_application_builder() as appbuilder:
user_exists = appbuilder.sm.find_user("admin")
password_path = os.path.join(AIRFLOW_HOME, "standalone_admin_password.txt")
we_know_password = os.path.isfile(password_path)
# If the user does not exist, make a random password and make it
if not user_exists:
self.print_output("standalone", "Creating admin user")
role = appbuilder.sm.find_role("Admin")
assert role is not None
password = "".join(
random.choice("abcdefghkmnpqrstuvwxyzABCDEFGHKMNPQRSTUVWXYZ23456789") for i in range(16)
)
with open(password_path, "w") as file:
file.write(password)
make_group_other_inaccessible(password_path)
appbuilder.sm.add_user("admin", "Admin", "User", "[email protected]", role, password)
self.print_output("standalone", "Created admin user")
# If the user does exist and we know its password, read the password
elif user_exists and we_know_password:
with open(password_path) as file:
password = file.read().strip()
# Otherwise we don't know the password
else:
password = None
# Store what we know about the user for printing later in startup
self.user_info = {"username": "admin", "password": password}
def is_ready(self):
"""
Detects when all Airflow components are ready to serve.
For now, it's simply time-based.
"""
return (
self.port_open(self.web_server_port)
and self.job_running(SchedulerJobRunner)
and self.job_running(TriggererJobRunner)
)
def port_open(self, port):
"""
Checks if the given port is listening on the local machine.
Used to tell if webserver is alive.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect(("127.0.0.1", port))
sock.close()
except (OSError, ValueError):
# Any exception means the socket is not available
return False
return True
def job_running(self, job_runner_class: type[BaseJobRunner]):
"""
Checks if the given job name is running and heartbeating correctly.
Used to tell if scheduler is alive.
"""
recent = most_recent_job(job_runner_class.job_type)
if not recent:
return False
return recent.is_alive()
def print_ready(self):
"""
Prints the banner shown when Airflow is ready to go.
Include with login details.
"""
self.print_output("standalone", "")
self.print_output("standalone", "Airflow is ready")
if self.user_info["password"]:
self.print_output(
"standalone",
f"Login with username: {self.user_info['username']} password: {self.user_info['password']}",
)
self.print_output(
"standalone",
"Airflow Standalone is for development purposes only. Do not use this in production!",
)
self.print_output("standalone", "")
class SubCommand(threading.Thread):
"""
Execute a subcommand on another thread.
Thread that launches a process and then streams its output back to the main
command. We use threads to avoid using select() and raw filehandles, and the
complex logic that brings doing line buffering.
"""
def __init__(self, parent, name: str, command: list[str], env: dict[str, str]):
super().__init__()
self.parent = parent
self.name = name
self.command = command
self.env = env
def run(self):
"""Runs the actual process and captures it output to a queue."""
self.process = subprocess.Popen(
["airflow"] + self.command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=self.env,
)
for line in self.process.stdout:
self.parent.output_queue.append((self.name, line))
def stop(self):
"""Call to stop this process (and thus this thread)."""
self.process.terminate()
# Alias for use in the CLI parser
standalone = StandaloneCommand.entrypoint
| 11,236 | 35.722222 | 109 | py |
airflow | airflow-main/airflow/cli/commands/pool_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pools sub-commands."""
from __future__ import annotations
import json
import os
from json import JSONDecodeError
from airflow.api.client import get_current_api_client
from airflow.cli.simple_table import AirflowConsole
from airflow.exceptions import PoolNotFound
from airflow.utils import cli as cli_utils
from airflow.utils.cli import suppress_logs_and_warning
def _show_pools(pools, output):
AirflowConsole().print_as(
data=pools,
output=output,
mapper=lambda x: {
"pool": x[0],
"slots": x[1],
"description": x[2],
},
)
@suppress_logs_and_warning
def pool_list(args):
"""Displays info of all the pools."""
api_client = get_current_api_client()
pools = api_client.get_pools()
_show_pools(pools=pools, output=args.output)
@suppress_logs_and_warning
def pool_get(args):
"""Displays pool info by a given name."""
api_client = get_current_api_client()
try:
pools = [api_client.get_pool(name=args.pool)]
_show_pools(pools=pools, output=args.output)
except PoolNotFound:
raise SystemExit(f"Pool {args.pool} does not exist")
@cli_utils.action_cli
@suppress_logs_and_warning
def pool_set(args):
"""Creates new pool with a given name and slots."""
api_client = get_current_api_client()
api_client.create_pool(name=args.pool, slots=args.slots, description=args.description)
print(f"Pool {args.pool} created")
@cli_utils.action_cli
@suppress_logs_and_warning
def pool_delete(args):
"""Deletes pool by a given name."""
api_client = get_current_api_client()
try:
api_client.delete_pool(name=args.pool)
print(f"Pool {args.pool} deleted")
except PoolNotFound:
raise SystemExit(f"Pool {args.pool} does not exist")
@cli_utils.action_cli
@suppress_logs_and_warning
def pool_import(args):
"""Imports pools from the file."""
if not os.path.exists(args.file):
raise SystemExit(f"Missing pools file {args.file}")
pools, failed = pool_import_helper(args.file)
if len(failed) > 0:
raise SystemExit(f"Failed to update pool(s): {', '.join(failed)}")
print(f"Uploaded {len(pools)} pool(s)")
def pool_export(args):
"""Exports all the pools to the file."""
pools = pool_export_helper(args.file)
print(f"Exported {len(pools)} pools to {args.file}")
def pool_import_helper(filepath):
"""Helps import pools from the json file."""
api_client = get_current_api_client()
with open(filepath) as poolfile:
data = poolfile.read()
try:
pools_json = json.loads(data)
except JSONDecodeError as e:
raise SystemExit(f"Invalid json file: {e}")
pools = []
failed = []
for k, v in pools_json.items():
if isinstance(v, dict) and len(v) == 2:
pools.append(api_client.create_pool(name=k, slots=v["slots"], description=v["description"]))
else:
failed.append(k)
return pools, failed
def pool_export_helper(filepath):
"""Helps export all the pools to the json file."""
api_client = get_current_api_client()
pool_dict = {}
pools = api_client.get_pools()
for pool in pools:
pool_dict[pool[0]] = {"slots": pool[1], "description": pool[2]}
with open(filepath, "w") as poolfile:
poolfile.write(json.dumps(pool_dict, sort_keys=True, indent=4))
return pools
| 4,191 | 30.757576 | 104 | py |
airflow | airflow-main/airflow/cli/commands/version_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Version command."""
from __future__ import annotations
import airflow
def version(args):
"""Displays Airflow version at the command line."""
print(airflow.__version__)
| 967 | 36.230769 | 62 | py |
airflow | airflow-main/airflow/cli/commands/variable_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Variable subcommands."""
from __future__ import annotations
import json
import os
from json import JSONDecodeError
from sqlalchemy import select
from airflow.cli.simple_table import AirflowConsole
from airflow.models import Variable
from airflow.utils import cli as cli_utils
from airflow.utils.cli import suppress_logs_and_warning
from airflow.utils.session import create_session
@suppress_logs_and_warning
def variables_list(args):
"""Displays all the variables."""
with create_session() as session:
variables = session.scalars(select(Variable)).all()
AirflowConsole().print_as(data=variables, output=args.output, mapper=lambda x: {"key": x.key})
@suppress_logs_and_warning
def variables_get(args):
"""Displays variable by a given name."""
try:
if args.default is None:
var = Variable.get(args.key, deserialize_json=args.json)
print(var)
else:
var = Variable.get(args.key, deserialize_json=args.json, default_var=args.default)
print(var)
except (ValueError, KeyError) as e:
raise SystemExit(str(e).strip("'\""))
@cli_utils.action_cli
def variables_set(args):
"""Creates new variable with a given name and value."""
Variable.set(args.key, args.value, serialize_json=args.json)
print(f"Variable {args.key} created")
@cli_utils.action_cli
def variables_delete(args):
"""Deletes variable by a given name."""
Variable.delete(args.key)
print(f"Variable {args.key} deleted")
@cli_utils.action_cli
def variables_import(args):
"""Imports variables from a given file."""
if os.path.exists(args.file):
_import_helper(args.file)
else:
raise SystemExit("Missing variables file.")
def variables_export(args):
"""Exports all the variables to the file."""
_variable_export_helper(args.file)
def _import_helper(filepath):
"""Helps import variables from the file."""
with open(filepath) as varfile:
data = varfile.read()
try:
var_json = json.loads(data)
except JSONDecodeError:
raise SystemExit("Invalid variables file.")
else:
suc_count = fail_count = 0
for k, v in var_json.items():
try:
Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e:
print(f"Variable import failed: {repr(e)}")
fail_count += 1
else:
suc_count += 1
print(f"{suc_count} of {len(var_json)} variables successfully updated.")
if fail_count:
print(f"{fail_count} variable(s) failed to be updated.")
def _variable_export_helper(filepath):
"""Helps export all the variables to the file."""
var_dict = {}
with create_session() as session:
qry = session.scalars(select(Variable))
data = json.JSONDecoder()
for var in qry:
try:
val = data.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
with open(filepath, "w") as varfile:
varfile.write(json.dumps(var_dict, sort_keys=True, indent=4))
print(f"{len(var_dict)} variables successfully exported to {filepath}")
| 4,043 | 31.352 | 98 | py |
airflow | airflow-main/airflow/cli/commands/internal_api_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Internal API command."""
from __future__ import annotations
import logging
import os
import signal
import subprocess
import sys
import textwrap
from contextlib import suppress
from tempfile import gettempdir
from time import sleep
import daemon
import psutil
from daemon.pidfile import TimeoutPIDLockFile
from flask import Flask
from flask_appbuilder import SQLA
from flask_caching import Cache
from flask_wtf.csrf import CSRFProtect
from lockfile.pidlockfile import read_pid_from_pidfile
from sqlalchemy.engine.url import make_url
from airflow import settings
from airflow.api_internal.internal_api_call import InternalApiConfig
from airflow.cli.commands.webserver_command import GunicornMonitor
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.logging_config import configure_logging
from airflow.models import import_all_models
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
from airflow.utils.process_utils import check_if_pidfile_process_is_running
from airflow.www.extensions.init_dagbag import init_dagbag
from airflow.www.extensions.init_jinja_globals import init_jinja_globals
from airflow.www.extensions.init_manifest_files import configure_manifest_files
from airflow.www.extensions.init_security import init_xframe_protection
from airflow.www.extensions.init_views import init_api_internal, init_error_handlers
log = logging.getLogger(__name__)
app: Flask | None = None
@cli_utils.action_cli
def internal_api(args):
"""Starts Airflow Internal API."""
print(settings.HEADER)
access_logfile = args.access_logfile or "-"
error_logfile = args.error_logfile or "-"
access_logformat = args.access_logformat
num_workers = args.workers
worker_timeout = args.worker_timeout
if args.debug:
log.info(f"Starting the Internal API server on port {args.port} and host {args.hostname}.")
app = create_app(testing=conf.getboolean("core", "unit_test_mode"))
app.run(
debug=True,
use_reloader=not app.config["TESTING"],
port=args.port,
host=args.hostname,
)
else:
pid_file, stdout, stderr, log_file = setup_locations(
"internal-api", args.pid, args.stdout, args.stderr, args.log_file
)
# Check if Internal APi is already running if not, remove old pidfile
check_if_pidfile_process_is_running(pid_file=pid_file, process_name="internal-api")
log.info(
textwrap.dedent(
f"""\
Running the Gunicorn Server with:
Workers: {num_workers} {args.workerclass}
Host: {args.hostname}:{args.port}
Timeout: {worker_timeout}
Logfiles: {access_logfile} {error_logfile}
Access Logformat: {access_logformat}
================================================================="""
)
)
run_args = [
sys.executable,
"-m",
"gunicorn",
"--workers",
str(num_workers),
"--worker-class",
str(args.workerclass),
"--timeout",
str(worker_timeout),
"--bind",
args.hostname + ":" + str(args.port),
"--name",
"airflow-internal-api",
"--pid",
pid_file,
"--access-logfile",
str(access_logfile),
"--error-logfile",
str(error_logfile),
]
if args.access_logformat and args.access_logformat.strip():
run_args += ["--access-logformat", str(args.access_logformat)]
if args.daemon:
run_args += ["--daemon"]
run_args += ["airflow.cli.commands.internal_api_command:cached_app()"]
# To prevent different workers creating the web app and
# all writing to the database at the same time, we use the --preload option.
# With the preload option, the app is loaded before the workers are forked, and each worker will
# then have a copy of the app
run_args += ["--preload"]
gunicorn_master_proc: psutil.Process | None = None
def kill_proc(signum, _):
log.info("Received signal: %s. Closing gunicorn.", signum)
gunicorn_master_proc.terminate()
with suppress(TimeoutError):
gunicorn_master_proc.wait(timeout=30)
if gunicorn_master_proc.is_running():
gunicorn_master_proc.kill()
sys.exit(0)
def monitor_gunicorn(gunicorn_master_pid: int):
# Register signal handlers
signal.signal(signal.SIGINT, kill_proc)
signal.signal(signal.SIGTERM, kill_proc)
# These run forever until SIG{INT, TERM, KILL, ...} signal is sent
GunicornMonitor(
gunicorn_master_pid=gunicorn_master_pid,
num_workers_expected=num_workers,
master_timeout=120,
worker_refresh_interval=30,
worker_refresh_batch_size=1,
reload_on_plugin_change=False,
).start()
if args.daemon:
# This makes possible errors get reported before daemonization
os.environ["SKIP_DAGS_PARSING"] = "True"
app = create_app(None)
os.environ.pop("SKIP_DAGS_PARSING")
handle = setup_logging(log_file)
base, ext = os.path.splitext(pid_file)
with open(stdout, "a") as stdout, open(stderr, "a") as stderr:
stdout.truncate(0)
stderr.truncate(0)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(f"{base}-monitor{ext}", -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
umask=int(settings.DAEMON_UMASK, 8),
)
with ctx:
subprocess.Popen(run_args, close_fds=True)
# Reading pid of gunicorn main process as it will be different that
# the one of process spawned above.
while True:
sleep(0.1)
gunicorn_master_proc_pid = read_pid_from_pidfile(pid_file)
if gunicorn_master_proc_pid:
break
# Run Gunicorn monitor
gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid)
monitor_gunicorn(gunicorn_master_proc.pid)
else:
with subprocess.Popen(run_args, close_fds=True) as gunicorn_master_proc:
monitor_gunicorn(gunicorn_master_proc.pid)
def create_app(config=None, testing=False):
"""Create a new instance of Airflow Internal API app."""
flask_app = Flask(__name__)
flask_app.config["APP_NAME"] = "Airflow Internal API"
flask_app.config["TESTING"] = testing
flask_app.config["SQLALCHEMY_DATABASE_URI"] = conf.get("database", "SQL_ALCHEMY_CONN")
url = make_url(flask_app.config["SQLALCHEMY_DATABASE_URI"])
if url.drivername == "sqlite" and url.database and not url.database.startswith("/"):
raise AirflowConfigException(
f'Cannot use relative path: `{conf.get("database", "SQL_ALCHEMY_CONN")}` to connect to sqlite. '
"Please use absolute path such as `sqlite:////tmp/airflow.db`."
)
flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
flask_app.config["SESSION_COOKIE_HTTPONLY"] = True
flask_app.config["SESSION_COOKIE_SAMESITE"] = "Lax"
if config:
flask_app.config.from_mapping(config)
if "SQLALCHEMY_ENGINE_OPTIONS" not in flask_app.config:
flask_app.config["SQLALCHEMY_ENGINE_OPTIONS"] = settings.prepare_engine_args()
InternalApiConfig.force_database_direct_access()
csrf = CSRFProtect()
csrf.init_app(flask_app)
db = SQLA()
db.session = settings.Session
db.init_app(flask_app)
init_dagbag(flask_app)
cache_config = {"CACHE_TYPE": "flask_caching.backends.filesystem", "CACHE_DIR": gettempdir()}
Cache(app=flask_app, config=cache_config)
configure_logging()
configure_manifest_files(flask_app)
import_all_models()
with flask_app.app_context():
init_error_handlers(flask_app)
init_api_internal(flask_app, standalone_api=True)
init_jinja_globals(flask_app)
init_xframe_protection(flask_app)
return flask_app
def cached_app(config=None, testing=False):
"""Return cached instance of Airflow Internal API app."""
global app
if not app:
app = create_app(config=config, testing=testing)
return app
| 9,634 | 35.634981 | 108 | py |
airflow | airflow-main/airflow/cli/commands/kerberos_command.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Kerberos command."""
from __future__ import annotations
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.security import kerberos as krb
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations
@cli_utils.action_cli
def kerberos(args):
"""Start a kerberos ticket renewer."""
print(settings.HEADER)
if args.daemon:
pid, stdout, stderr, _ = setup_locations(
"kerberos", args.pid, args.stdout, args.stderr, args.log_file
)
with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle:
stdout_handle.truncate(0)
stderr_handle.truncate(0)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout_handle,
stderr=stderr_handle,
umask=int(settings.DAEMON_UMASK, 8),
)
with ctx:
krb.run(principal=args.principal, keytab=args.keytab)
else:
krb.run(principal=args.principal, keytab=args.keytab)
| 1,901 | 34.886792 | 84 | py |
airflow | airflow-main/airflow/ti_deps/dependencies_deps.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.dependencies_states import (
BACKFILL_QUEUEABLE_STATES,
QUEUEABLE_STATES,
RUNNABLE_STATES,
SCHEDULEABLE_STATES,
)
from airflow.ti_deps.deps.dag_ti_slots_available_dep import DagTISlotsAvailableDep
from airflow.ti_deps.deps.dag_unpaused_dep import DagUnpausedDep
from airflow.ti_deps.deps.dagrun_backfill_dep import DagRunNotBackfillDep
from airflow.ti_deps.deps.dagrun_exists_dep import DagrunRunningDep
from airflow.ti_deps.deps.exec_date_after_start_date_dep import ExecDateAfterStartDateDep
from airflow.ti_deps.deps.pool_slots_available_dep import PoolSlotsAvailableDep
from airflow.ti_deps.deps.runnable_exec_date_dep import RunnableExecDateDep
from airflow.ti_deps.deps.task_concurrency_dep import TaskConcurrencyDep
from airflow.ti_deps.deps.task_not_running_dep import TaskNotRunningDep
from airflow.ti_deps.deps.valid_state_dep import ValidStateDep
# Context to get the dependencies that need to be met in order for a task instance to be
# set to 'scheduled' state.
SCHEDULED_DEPS = {
RunnableExecDateDep(),
ValidStateDep(SCHEDULEABLE_STATES),
TaskNotRunningDep(),
}
# Dependencies that if met, task instance should be re-queued.
REQUEUEABLE_DEPS = {
DagTISlotsAvailableDep(),
TaskConcurrencyDep(),
PoolSlotsAvailableDep(),
}
# Dependencies that need to be met for a given task instance to be set to 'RUNNING' state.
RUNNING_DEPS = {
RunnableExecDateDep(),
ValidStateDep(RUNNABLE_STATES),
DagTISlotsAvailableDep(),
TaskConcurrencyDep(),
PoolSlotsAvailableDep(),
TaskNotRunningDep(),
}
BACKFILL_QUEUED_DEPS = {
RunnableExecDateDep(),
ValidStateDep(BACKFILL_QUEUEABLE_STATES),
DagrunRunningDep(),
TaskNotRunningDep(),
}
# TODO(aoen): SCHEDULER_QUEUED_DEPS is not coupled to actual scheduling/execution
# in any way and could easily be modified or removed from the scheduler causing
# this dependency to become outdated and incorrect. This coupling should be created
# (e.g. via a dag_deps analog of ti_deps that will be used in the scheduler code,
# or allow batch deps checks) to ensure that the logic here is equivalent to the logic
# in the scheduler.
# Right now there's one discrepancy between this context and how scheduler schedule tasks:
# Scheduler will check if the executor has the task instance--it is not possible
# to check the executor outside scheduler main process.
# Dependencies that need to be met for a given task instance to be set to 'queued' state
# by the scheduler.
# This context has more DEPs than RUNNING_DEPS, as we can have task triggered by
# components other than scheduler, e.g. webserver.
SCHEDULER_QUEUED_DEPS = {
RunnableExecDateDep(),
ValidStateDep(QUEUEABLE_STATES),
DagTISlotsAvailableDep(),
TaskConcurrencyDep(),
PoolSlotsAvailableDep(),
DagrunRunningDep(),
DagRunNotBackfillDep(),
DagUnpausedDep(),
ExecDateAfterStartDateDep(),
TaskNotRunningDep(),
}
| 3,768 | 39.095745 | 90 | py |
airflow | airflow-main/airflow/ti_deps/dep_context.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
import attr
from sqlalchemy.orm.session import Session
from airflow.exceptions import TaskNotFound
from airflow.utils.state import State
if TYPE_CHECKING:
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
@attr.define
class DepContext:
"""
A base class for dependency contexts.
Specifies which dependencies should be evaluated in the context for a task
instance to satisfy the requirements of the context. Also stores state
related to the context that can be used by dependency classes.
For example there could be a SomeRunContext that subclasses this class which has
dependencies for:
- Making sure there are slots available on the infrastructure to run the task instance
- A task-instance's task-specific dependencies are met (e.g. the previous task
instance completed successfully)
- ...
:param deps: The context-specific dependencies that need to be evaluated for a
task instance to run in this execution context.
:param flag_upstream_failed: This is a hack to generate the upstream_failed state
creation while checking to see whether the task instance is runnable. It was the
shortest path to add the feature. This is bad since this class should be pure (no
side effects).
:param ignore_all_deps: Whether or not the context should ignore all ignorable
dependencies. Overrides the other ignore_* parameters
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for
Backfills)
:param wait_for_past_depends_before_skipping: Wait for past depends before marking the ti as skipped
:param ignore_in_retry_period: Ignore the retry period for task instances
:param ignore_in_reschedule_period: Ignore the reschedule period for task instances
:param ignore_unmapped_tasks: Ignore errors about mapped tasks not yet being expanded
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and
trigger rule
:param ignore_ti_state: Ignore the task instance's previous failure/success
:param finished_tis: A list of all the finished task instances of this run
"""
deps: set = attr.ib(factory=set)
flag_upstream_failed: bool = False
ignore_all_deps: bool = False
ignore_depends_on_past: bool = False
wait_for_past_depends_before_skipping: bool = False
ignore_in_retry_period: bool = False
ignore_in_reschedule_period: bool = False
ignore_task_deps: bool = False
ignore_ti_state: bool = False
ignore_unmapped_tasks: bool = False
finished_tis: list[TaskInstance] | None = None
description: str | None = None
have_changed_ti_states: bool = False
"""Have any of the TIs state's been changed as a result of evaluating dependencies"""
def ensure_finished_tis(self, dag_run: DagRun, session: Session) -> list[TaskInstance]:
"""
Ensures finished_tis is populated if it's currently None, which allows running tasks without dag_run.
:param dag_run: The DagRun for which to find finished tasks
:return: A list of all the finished tasks of this DAG and execution_date
"""
if self.finished_tis is None:
finished_tis = dag_run.get_task_instances(state=State.finished, session=session)
for ti in finished_tis:
if not hasattr(ti, "task") and dag_run.dag:
try:
ti.task = dag_run.dag.get_task(ti.task_id)
except TaskNotFound:
pass
self.finished_tis = finished_tis
else:
finished_tis = self.finished_tis
return finished_tis
| 4,594 | 42.349057 | 109 | py |
airflow | airflow-main/airflow/ti_deps/dependencies_states.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.utils.state import TaskInstanceState
EXECUTION_STATES = {
TaskInstanceState.RUNNING,
TaskInstanceState.QUEUED,
}
# In order to be able to get queued a task must have one of these states
SCHEDULEABLE_STATES = {
None,
TaskInstanceState.UP_FOR_RETRY,
TaskInstanceState.UP_FOR_RESCHEDULE,
}
RUNNABLE_STATES = {
# For cases like unit tests and run manually
None,
TaskInstanceState.UP_FOR_RETRY,
TaskInstanceState.UP_FOR_RESCHEDULE,
# For normal scheduler/backfill cases
TaskInstanceState.QUEUED,
}
QUEUEABLE_STATES = {
TaskInstanceState.SCHEDULED,
}
BACKFILL_QUEUEABLE_STATES = {
# For cases like unit tests and run manually
None,
TaskInstanceState.UP_FOR_RESCHEDULE,
TaskInstanceState.UP_FOR_RETRY,
# For normal backfill cases
TaskInstanceState.SCHEDULED,
}
| 1,665 | 29.851852 | 72 | py |
airflow | airflow-main/airflow/ti_deps/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/ti_deps/deps/trigger_rule_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections
import collections.abc
import functools
from typing import TYPE_CHECKING, Iterator, NamedTuple
from sqlalchemy import and_, func, or_
from airflow.models import MappedOperator
from airflow.models.taskinstance import PAST_DEPENDS_MET
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep, TIDepStatus
from airflow.utils.state import TaskInstanceState
from airflow.utils.trigger_rule import TriggerRule as TR
if TYPE_CHECKING:
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import ColumnOperators
from airflow.models.taskinstance import TaskInstance
class _UpstreamTIStates(NamedTuple):
"""States of the upstream tis for a specific ti.
This is used to determine whether the specific ti can run in this iteration.
"""
success: int
skipped: int
failed: int
upstream_failed: int
removed: int
done: int
success_setup: int
skipped_setup: int
@classmethod
def calculate(cls, finished_upstreams: Iterator[TaskInstance]) -> _UpstreamTIStates:
"""Calculate states for a task instance.
``counter`` is inclusive of ``setup_counter`` -- e.g. if there are 2 skipped upstreams, one
of which is a setup, then counter will show 2 skipped and setup counter will show 1.
:param ti: the ti that we want to calculate deps for
:param finished_tis: all the finished tasks of the dag_run
"""
counter: dict[str, int] = collections.Counter()
setup_counter: dict[str, int] = collections.Counter()
for ti in finished_upstreams:
curr_state = {ti.state: 1}
counter.update(curr_state)
# setup task cannot be mapped
if not isinstance(ti.task, MappedOperator) and ti.task.is_setup:
setup_counter.update(curr_state)
return _UpstreamTIStates(
success=counter.get(TaskInstanceState.SUCCESS, 0),
skipped=counter.get(TaskInstanceState.SKIPPED, 0),
failed=counter.get(TaskInstanceState.FAILED, 0),
upstream_failed=counter.get(TaskInstanceState.UPSTREAM_FAILED, 0),
removed=counter.get(TaskInstanceState.REMOVED, 0),
done=sum(counter.values()),
success_setup=setup_counter.get(TaskInstanceState.SUCCESS, 0),
skipped_setup=setup_counter.get(TaskInstanceState.SKIPPED, 0),
)
class TriggerRuleDep(BaseTIDep):
"""Determines if a task's upstream tasks are in a state that allows a given task instance to run."""
NAME = "Trigger Rule"
IGNORABLE = True
IS_TASK_DEP = True
def _get_dep_statuses(
self,
ti: TaskInstance,
session: Session,
dep_context: DepContext,
) -> Iterator[TIDepStatus]:
# Checking that all upstream dependencies have succeeded.
if not ti.task.upstream_task_ids:
yield self._passing_status(reason="The task instance did not have any upstream tasks.")
return
if ti.task.trigger_rule == TR.ALWAYS:
yield self._passing_status(reason="The task had a always trigger rule set.")
return
yield from self._evaluate_trigger_rule(ti=ti, dep_context=dep_context, session=session)
def _evaluate_trigger_rule(
self,
*,
ti: TaskInstance,
dep_context: DepContext,
session: Session,
) -> Iterator[TIDepStatus]:
"""Evaluate whether ``ti``'s trigger rule was met.
:param ti: Task instance to evaluate the trigger rule of.
:param dep_context: The current dependency context.
:param session: Database session.
"""
from airflow.models.abstractoperator import NotMapped
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.operator import needs_expansion
from airflow.models.taskinstance import TaskInstance
task = ti.task
upstream_tasks = {t.task_id: t for t in task.upstream_list}
trigger_rule = task.trigger_rule
@functools.lru_cache
def _get_expanded_ti_count() -> int:
"""Get how many tis the current task is supposed to be expanded into.
This extra closure allows us to query the database only when needed,
and at most once.
"""
return task.get_mapped_ti_count(ti.run_id, session=session)
@functools.lru_cache
def _get_relevant_upstream_map_indexes(upstream_id: str) -> int | range | None:
"""Get the given task's map indexes relevant to the current ti.
This extra closure allows us to query the database only when needed,
and at most once for each task (instead of once for each expanded
task instance of the same task).
"""
try:
expanded_ti_count = _get_expanded_ti_count()
except (NotFullyPopulated, NotMapped):
return None
return ti.get_relevant_upstream_map_indexes(
upstream_tasks[upstream_id],
expanded_ti_count,
session=session,
)
def _is_relevant_upstream(upstream: TaskInstance) -> bool:
"""Whether a task instance is a "relevant upstream" of the current task."""
# Not actually an upstream task.
if upstream.task_id not in task.upstream_task_ids:
return False
# The current task is not in a mapped task group. All tis from an
# upstream task are relevant.
if task.get_closest_mapped_task_group() is None:
return True
# The upstream ti is not expanded. The upstream may be mapped or
# not, but the ti is relevant either way.
if upstream.map_index < 0:
return True
# Now we need to perform fine-grained check on whether this specific
# upstream ti's map index is relevant.
relevant = _get_relevant_upstream_map_indexes(upstream.task_id)
if relevant is None:
return True
if relevant == upstream.map_index:
return True
if isinstance(relevant, collections.abc.Container) and upstream.map_index in relevant:
return True
return False
finished_upstream_tis = (
finished_ti
for finished_ti in dep_context.ensure_finished_tis(ti.get_dagrun(session), session)
if _is_relevant_upstream(finished_ti)
)
upstream_states = _UpstreamTIStates.calculate(finished_upstream_tis)
success = upstream_states.success
skipped = upstream_states.skipped
failed = upstream_states.failed
upstream_failed = upstream_states.upstream_failed
removed = upstream_states.removed
done = upstream_states.done
success_setup = upstream_states.success_setup
skipped_setup = upstream_states.skipped_setup
def _iter_upstream_conditions() -> Iterator[ColumnOperators]:
# Optimization: If the current task is not in a mapped task group,
# it depends on all upstream task instances.
if task.get_closest_mapped_task_group() is None:
yield TaskInstance.task_id.in_(upstream_tasks)
return
# Otherwise we need to figure out which map indexes are depended on
# for each upstream by the current task instance.
for upstream_id in upstream_tasks:
map_indexes = _get_relevant_upstream_map_indexes(upstream_id)
if map_indexes is None: # All tis of this upstream are dependencies.
yield (TaskInstance.task_id == upstream_id)
continue
# At this point we know we want to depend on only selected tis
# of this upstream task. Since the upstream may not have been
# expanded at this point, we also depend on the non-expanded ti
# to ensure at least one ti is included for the task.
yield and_(TaskInstance.task_id == upstream_id, TaskInstance.map_index < 0)
if isinstance(map_indexes, range) and map_indexes.step == 1:
yield and_(
TaskInstance.task_id == upstream_id,
TaskInstance.map_index >= map_indexes.start,
TaskInstance.map_index < map_indexes.stop,
)
elif isinstance(map_indexes, collections.abc.Container):
yield and_(TaskInstance.task_id == upstream_id, TaskInstance.map_index.in_(map_indexes))
else:
yield and_(TaskInstance.task_id == upstream_id, TaskInstance.map_index == map_indexes)
# Optimization: Don't need to hit the database if all upstreams are
# "simple" tasks (no task or task group mapping involved).
if not any(needs_expansion(t) for t in upstream_tasks.values()):
upstream = len(upstream_tasks)
upstream_setup = len(
[x for x in upstream_tasks.values() if not isinstance(x, MappedOperator) and x.is_setup]
)
else:
upstream = (
session.query(func.count())
.filter(TaskInstance.dag_id == ti.dag_id, TaskInstance.run_id == ti.run_id)
.filter(or_(*_iter_upstream_conditions()))
.scalar()
)
# todo: add support for mapped setup?
upstream_setup = None
upstream_done = done >= upstream
changed = False
new_state = None
if dep_context.flag_upstream_failed:
if trigger_rule == TR.ALL_SUCCESS:
if upstream_failed or failed:
new_state = TaskInstanceState.UPSTREAM_FAILED
elif skipped:
new_state = TaskInstanceState.SKIPPED
elif removed and success and ti.map_index > -1:
if ti.map_index >= success:
new_state = TaskInstanceState.REMOVED
elif trigger_rule == TR.ALL_FAILED:
if success or skipped:
new_state = TaskInstanceState.SKIPPED
elif trigger_rule == TR.ONE_SUCCESS:
if upstream_done and done == skipped:
# if upstream is done and all are skipped mark as skipped
new_state = TaskInstanceState.SKIPPED
elif upstream_done and success <= 0:
# if upstream is done and there are no success mark as upstream failed
new_state = TaskInstanceState.UPSTREAM_FAILED
elif trigger_rule == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
new_state = TaskInstanceState.SKIPPED
elif trigger_rule == TR.ONE_DONE:
if upstream_done and not (failed or success):
new_state = TaskInstanceState.SKIPPED
elif trigger_rule == TR.NONE_FAILED:
if upstream_failed or failed:
new_state = TaskInstanceState.UPSTREAM_FAILED
elif trigger_rule == TR.NONE_FAILED_MIN_ONE_SUCCESS:
if upstream_failed or failed:
new_state = TaskInstanceState.UPSTREAM_FAILED
elif skipped == upstream:
new_state = TaskInstanceState.SKIPPED
elif trigger_rule == TR.NONE_SKIPPED:
if skipped:
new_state = TaskInstanceState.SKIPPED
elif trigger_rule == TR.ALL_SKIPPED:
if success or failed:
new_state = TaskInstanceState.SKIPPED
elif trigger_rule == TR.ALL_DONE_SETUP_SUCCESS:
if upstream_done and upstream_setup and skipped_setup >= upstream_setup:
# when there is an upstream setup and they have all skipped, then skip
new_state = TaskInstanceState.SKIPPED
elif upstream_done and upstream_setup and success_setup == 0:
# when there is an upstream setup, if none succeeded, mark upstream failed
# if at least one setup ran, we'll let it run
new_state = TaskInstanceState.UPSTREAM_FAILED
if new_state is not None:
if new_state == TaskInstanceState.SKIPPED and dep_context.wait_for_past_depends_before_skipping:
past_depends_met = ti.xcom_pull(
task_ids=ti.task_id, key=PAST_DEPENDS_MET, session=session, default=False
)
if not past_depends_met:
yield self._failing_status(
reason=("Task should be skipped but the the past depends are not met")
)
return
changed = ti.set_state(new_state, session)
if changed:
dep_context.have_changed_ti_states = True
if trigger_rule == TR.ONE_SUCCESS:
if success <= 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires one upstream task success, "
f"but none were found. upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires one upstream task failure, "
f"but none were found. upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ONE_DONE:
if success + failed <= 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}'"
"requires at least one upstream task failure or success"
f"but none were failed or success. upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_SUCCESS:
num_failures = upstream - success
if ti.map_index > -1:
num_failures -= removed
if num_failures > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"succeeded, but found {num_failures} non-success(es). "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_FAILED:
num_success = upstream - failed - upstream_failed
if ti.map_index > -1:
num_success -= removed
if num_success > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have failed, "
f"but found {num_success} non-failure(s). "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"completed, but found {len(upstream_tasks) - done} task(s) that were not done. "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.NONE_FAILED:
num_failures = upstream - success - skipped
if ti.map_index > -1:
num_failures -= removed
if num_failures > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"succeeded or been skipped, but found {num_failures} non-success(es). "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.NONE_FAILED_MIN_ONE_SUCCESS:
num_failures = upstream - success - skipped
if ti.map_index > -1:
num_failures -= removed
if num_failures > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"succeeded or been skipped, but found {num_failures} non-success(es). "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.NONE_SKIPPED:
if not upstream_done or (skipped > 0):
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to not have been "
f"skipped, but found {skipped} task(s) skipped. "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_SKIPPED:
num_non_skipped = upstream - skipped
if num_non_skipped > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have been "
f"skipped, but found {num_non_skipped} task(s) in non skipped state. "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_DONE_SETUP_SUCCESS:
if not upstream_done:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"completed, but found {len(upstream_tasks) - done} task(s) that were not done. "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif upstream_setup is None: # for now, None only happens in mapped case
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' cannot have mapped tasks as upstream. "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif upstream_setup and not success_setup >= 1:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires at least one upstream setup task be "
f"successful, but found {upstream_setup - success_setup} task(s) that were not. "
f"upstream_states={upstream_states}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
else:
yield self._failing_status(reason=f"No strategy to evaluate trigger rule '{trigger_rule}'.")
| 21,212 | 46.350446 | 109 | py |
airflow | airflow-main/airflow/ti_deps/deps/ready_to_reschedule.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.executors.executor_loader import ExecutorLoader
from airflow.models.taskreschedule import TaskReschedule
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.utils.state import TaskInstanceState
class ReadyToRescheduleDep(BaseTIDep):
"""Determines whether a task is ready to be rescheduled."""
NAME = "Ready To Reschedule"
IGNORABLE = True
IS_TASK_DEP = True
RESCHEDULEABLE_STATES = {TaskInstanceState.UP_FOR_RESCHEDULE, None}
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
"""
Determines whether a task is ready to be rescheduled.
Only tasks in NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is considered as passed.
This dependency fails if the latest reschedule request's reschedule date is still
in the future.
"""
from airflow.models.mappedoperator import MappedOperator
is_mapped = isinstance(ti.task, MappedOperator)
executor, _ = ExecutorLoader.import_default_executor_cls()
if (
# Mapped sensors don't have the reschedule property (it can only be calculated after unmapping),
# so we don't check them here. They are handled below by checking TaskReschedule instead.
not is_mapped
and not getattr(ti.task, "reschedule", False)
# Executors can force running in reschedule mode,
# in which case we ignore the value of the task property.
and not executor.change_sensor_mode_to_reschedule
):
yield self._passing_status(reason="Task is not in reschedule mode.")
return
if dep_context.ignore_in_reschedule_period:
yield self._passing_status(
reason="The context specified that being in a reschedule period was permitted."
)
return
if ti.state not in self.RESCHEDULEABLE_STATES:
yield self._passing_status(
reason="The task instance is not in State_UP_FOR_RESCHEDULE or NONE state."
)
return
task_reschedule = (
TaskReschedule.query_for_task_instance(task_instance=ti, descending=True, session=session)
.with_entities(TaskReschedule.reschedule_date)
.first()
)
if not task_reschedule:
# Because mapped sensors don't have the reschedule property, here's the last resort
# and we need a slightly different passing reason
if is_mapped:
yield self._passing_status(reason="The task is mapped and not in reschedule mode")
return
yield self._passing_status(reason="There is no reschedule request for this task instance.")
return
now = timezone.utcnow()
next_reschedule_date = task_reschedule.reschedule_date
if now >= next_reschedule_date:
yield self._passing_status(reason="Task instance id ready for reschedule.")
return
yield self._failing_status(
reason=(
"Task is not ready for reschedule yet but will be rescheduled automatically. "
f"Current date is {now.isoformat()} and task will be "
f"rescheduled at {next_reschedule_date.isoformat()}."
)
)
| 4,333 | 41.910891 | 108 | py |
airflow | airflow-main/airflow/ti_deps/deps/dagrun_exists_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
from airflow.utils.state import DagRunState
class DagrunRunningDep(BaseTIDep):
"""Determines whether a task's DagRun is in valid state."""
NAME = "Dagrun Running"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
dr = ti.get_dagrun(session)
if dr.state != DagRunState.RUNNING:
yield self._failing_status(
reason=f"Task instance's dagrun was not in the 'running' state but in the state '{dr.state}'."
)
| 1,449 | 37.157895 | 110 | py |
airflow | airflow-main/airflow/ti_deps/deps/runnable_exec_date_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils import timezone
from airflow.utils.session import provide_session
class RunnableExecDateDep(BaseTIDep):
"""Determines whether a task's execution date is valid."""
NAME = "Execution Date"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
cur_date = timezone.utcnow()
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
logical_date = ti.get_dagrun(session).execution_date
if logical_date > cur_date and not ti.task.dag.allow_future_exec_dates:
yield self._failing_status(
reason=(
f"Execution date {logical_date.isoformat()} is in the future "
f"(the current date is {cur_date.isoformat()})."
)
)
if ti.task.end_date and logical_date > ti.task.end_date:
yield self._failing_status(
reason=(
f"The execution date is {logical_date.isoformat()} but this is "
f"after the task's end date {ti.task.end_date.isoformat()}."
)
)
if ti.task.dag and ti.task.dag.end_date and logical_date > ti.task.dag.end_date:
yield self._failing_status(
reason=(
f"The execution date is {logical_date.isoformat()} but this is after "
f"the task's DAG's end date {ti.task.dag.end_date.isoformat()}."
)
)
| 2,446 | 39.114754 | 90 | py |
airflow | airflow-main/airflow/ti_deps/deps/dagrun_backfill_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module defines dep for making sure DagRun not a backfill."""
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
from airflow.utils.types import DagRunType
class DagRunNotBackfillDep(BaseTIDep):
"""Dep for valid DagRun run_id to schedule from scheduler."""
NAME = "DagRun is not backfill job"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context=None):
"""
Determines if the DagRun is valid for scheduling from scheduler.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
:return: True if DagRun is valid for scheduling from scheduler.
"""
dagrun = ti.get_dagrun(session)
if dagrun.run_type == DagRunType.BACKFILL_JOB:
yield self._failing_status(
reason=f"Task's DagRun run_type is {dagrun.run_type} and cannot be run by the scheduler"
)
| 1,921 | 39.041667 | 104 | py |
airflow | airflow-main/airflow/ti_deps/deps/not_previously_skipped_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.models.taskinstance import PAST_DEPENDS_MET
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
class NotPreviouslySkippedDep(BaseTIDep):
"""
Determine if this task should be skipped.
Based on any of the task's direct upstream relatives have decided this task should
be skipped.
"""
NAME = "Not Previously Skipped"
IGNORABLE = True
IS_TASK_DEP = True
def _get_dep_statuses(self, ti, session, dep_context):
from airflow.models.skipmixin import (
XCOM_SKIPMIXIN_FOLLOWED,
XCOM_SKIPMIXIN_KEY,
XCOM_SKIPMIXIN_SKIPPED,
SkipMixin,
)
from airflow.utils.state import TaskInstanceState
upstream = ti.task.get_direct_relatives(upstream=True)
finished_tis = dep_context.ensure_finished_tis(ti.get_dagrun(session), session)
finished_task_ids = {t.task_id for t in finished_tis}
for parent in upstream:
if isinstance(parent, SkipMixin):
if parent.task_id not in finished_task_ids:
# This can happen if the parent task has not yet run.
continue
prev_result = ti.xcom_pull(task_ids=parent.task_id, key=XCOM_SKIPMIXIN_KEY, session=session)
if prev_result is None:
# This can happen if the parent task has not yet run.
continue
should_skip = False
if (
XCOM_SKIPMIXIN_FOLLOWED in prev_result
and ti.task_id not in prev_result[XCOM_SKIPMIXIN_FOLLOWED]
):
# Skip any tasks that are not in "followed"
should_skip = True
elif (
XCOM_SKIPMIXIN_SKIPPED in prev_result
and ti.task_id in prev_result[XCOM_SKIPMIXIN_SKIPPED]
):
# Skip any tasks that are in "skipped"
should_skip = True
if should_skip:
# If the parent SkipMixin has run, and the XCom result stored indicates this
# ti should be skipped, set ti.state to SKIPPED and fail the rule so that the
# ti does not execute.
if dep_context.wait_for_past_depends_before_skipping:
past_depends_met = ti.xcom_pull(
task_ids=ti.task_id, key=PAST_DEPENDS_MET, session=session, default=False
)
if not past_depends_met:
yield self._failing_status(
reason=("Task should be skipped but the the past depends are not met")
)
return
ti.set_state(TaskInstanceState.SKIPPED, session)
yield self._failing_status(
reason=f"Skipping because of previous XCom result from parent task {parent.task_id}"
)
return
| 3,909 | 40.157895 | 108 | py |
airflow | airflow-main/airflow/ti_deps/deps/not_in_retry_period_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.utils.state import TaskInstanceState
class NotInRetryPeriodDep(BaseTIDep):
"""Determines whether a task is not in retry period."""
NAME = "Not In Retry Period"
IGNORABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_in_retry_period:
yield self._passing_status(
reason="The context specified that being in a retry period was permitted."
)
return
if ti.state != TaskInstanceState.UP_FOR_RETRY:
yield self._passing_status(reason="The task instance was not marked for retrying.")
return
# Calculate the date first so that it is always smaller than the timestamp used by
# ready_for_retry
cur_date = timezone.utcnow()
next_task_retry_date = ti.next_retry_datetime()
if ti.is_premature:
yield self._failing_status(
reason=(
f"Task is not ready for retry yet but will be retried automatically. "
f"Current date is {cur_date.isoformat()} and task will be retried "
f"at {next_task_retry_date.isoformat()}."
)
)
| 2,231 | 38.157895 | 95 | py |
airflow | airflow-main/airflow/ti_deps/deps/base_ti_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Iterator, NamedTuple
from airflow.ti_deps.dep_context import DepContext
from airflow.utils.session import provide_session
if TYPE_CHECKING:
from sqlalchemy.orm import Session
from airflow.models.taskinstance import TaskInstance
class BaseTIDep:
"""
Abstract base class for task instances dependencies.
All dependencies must be satisfied in order for task instances to run.
For example, a task that can only run if a certain number of its upstream tasks succeed.
This is an abstract class and must be subclassed to be used.
"""
# If this dependency can be ignored by a context in which it is added to. Needed
# because some dependencies should never be ignorable in their contexts.
IGNORABLE = False
# Whether this dependency is not a global task instance dependency but specific
# to some tasks (e.g. depends_on_past is not specified by all tasks).
IS_TASK_DEP = False
def __eq__(self, other: Any) -> bool:
return isinstance(self, type(other))
def __hash__(self) -> int:
return hash(type(self))
def __repr__(self) -> str:
return f"<TIDep({self.name})>"
@property
def name(self) -> str:
"""The human-readable name for the dependency.
Use the class name as the default if ``NAME`` is not provided.
"""
return getattr(self, "NAME", self.__class__.__name__)
def _get_dep_statuses(
self,
ti: TaskInstance,
session: Session,
dep_context: DepContext,
) -> Iterator[TIDepStatus]:
"""
Abstract method that returns an iterable of TIDepStatus objects.
Each object describes whether the given task instance has this dependency met.
For example a subclass could return an iterable of TIDepStatus objects, each one
representing if each of the passed in task's upstream tasks succeeded or not.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
"""
raise NotImplementedError
@provide_session
def get_dep_statuses(
self,
ti: TaskInstance,
session: Session,
dep_context: DepContext | None = None,
) -> Iterator[TIDepStatus]:
"""
Wrapper around the private _get_dep_statuses method.
Contains some global checks for all dependencies.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
"""
cxt = DepContext() if dep_context is None else dep_context
if self.IGNORABLE and cxt.ignore_all_deps:
yield self._passing_status(reason="Context specified all dependencies should be ignored.")
return
if self.IS_TASK_DEP and cxt.ignore_task_deps:
yield self._passing_status(reason="Context specified all task dependencies should be ignored.")
return
yield from self._get_dep_statuses(ti, session, cxt)
@provide_session
def is_met(self, ti: TaskInstance, session: Session, dep_context: DepContext | None = None) -> bool:
"""
Returns whether a dependency is met for a given task instance.
A dependency is considered met if all the dependency statuses it reports are passing.
:param ti: the task instance to see if this dependency is met for
:param session: database session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
"""
return all(status.passed for status in self.get_dep_statuses(ti, session, dep_context))
@provide_session
def get_failure_reasons(
self,
ti: TaskInstance,
session: Session,
dep_context: DepContext | None = None,
) -> Iterator[str]:
"""
Returns an iterable of strings that explain why this dependency wasn't met.
:param ti: the task instance to see if this dependency is met for
:param session: database session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
"""
for dep_status in self.get_dep_statuses(ti, session, dep_context):
if not dep_status.passed:
yield dep_status.reason
def _failing_status(self, reason: str = "") -> TIDepStatus:
return TIDepStatus(self.name, False, reason)
def _passing_status(self, reason: str = "") -> TIDepStatus:
return TIDepStatus(self.name, True, reason)
class TIDepStatus(NamedTuple):
"""Dependency status for a task instance indicating whether the task instance passed the dependency."""
dep_name: str
passed: bool
reason: str
| 5,854 | 35.823899 | 107 | py |
airflow | airflow-main/airflow/ti_deps/deps/valid_state_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.exceptions import AirflowException
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class ValidStateDep(BaseTIDep):
"""
Ensures that the task instance's state is in a given set of valid states.
:param valid_states: A list of valid states that a task instance can have to meet
this dependency.
:return: whether or not the task instance's state is valid
"""
NAME = "Task Instance State"
IGNORABLE = True
def __init__(self, valid_states):
super().__init__()
if not valid_states:
raise AirflowException("ValidStatesDep received an empty set of valid states.")
self._valid_states = valid_states
def __eq__(self, other):
return isinstance(self, type(other)) and self._valid_states == other._valid_states
def __hash__(self):
return hash((type(self), tuple(self._valid_states)))
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_ti_state:
yield self._passing_status(reason="Context specified that state should be ignored.")
return
if ti.state in self._valid_states:
yield self._passing_status(reason=f"Task state {ti.state} was valid.")
return
yield self._failing_status(reason=f"Task is in the '{ti.state}' state.")
| 2,236 | 35.672131 | 96 | py |
airflow | airflow-main/airflow/ti_deps/deps/prev_dagrun_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import func
from airflow.models.taskinstance import PAST_DEPENDS_MET, TaskInstance as TI
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
from airflow.utils.state import TaskInstanceState
class PrevDagrunDep(BaseTIDep):
"""
Is the past dagrun in a state that allows this task instance to run.
For example, did this task instance's task in the previous dagrun complete
if we are depending on past?
"""
NAME = "Previous Dagrun State"
IGNORABLE = True
IS_TASK_DEP = True
@staticmethod
def _push_past_deps_met_xcom_if_needed(ti: TI, dep_context):
if dep_context.wait_for_past_depends_before_skipping:
ti.xcom_push(key=PAST_DEPENDS_MET, value=True)
@provide_session
def _get_dep_statuses(self, ti: TI, session, dep_context):
if dep_context.ignore_depends_on_past:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
reason = "The context specified that the state of past DAGs could be ignored."
yield self._passing_status(reason=reason)
return
if not ti.task.depends_on_past:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="The task did not have depends_on_past set.")
return
dr = ti.get_dagrun(session=session)
if not dr:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="This task instance does not belong to a DAG.")
return
# Don't depend on the previous task instance if we are the first task.
catchup = ti.task.dag and ti.task.dag.catchup
if catchup:
last_dagrun = dr.get_previous_scheduled_dagrun(session)
else:
last_dagrun = dr.get_previous_dagrun(session=session)
# First ever run for this DAG.
if not last_dagrun:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="This task instance was the first task instance for its task.")
return
# There was a DAG run, but the task wasn't active back then.
if catchup and last_dagrun.execution_date < ti.task.start_date:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="This task instance was the first task instance for its task.")
return
previous_ti = last_dagrun.get_task_instance(ti.task_id, map_index=ti.map_index, session=session)
if not previous_ti:
if ti.task.ignore_first_depends_on_past:
has_historical_ti = (
session.query(func.count(TI.dag_id))
.filter(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date < ti.execution_date,
)
.scalar()
> 0
)
if not has_historical_ti:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(
reason="ignore_first_depends_on_past is true for this task "
"and it is the first task instance for its task."
)
return
yield self._failing_status(
reason="depends_on_past is true for this task's DAG, but the previous "
"task instance has not run yet."
)
return
if previous_ti.state not in {TaskInstanceState.SKIPPED, TaskInstanceState.SUCCESS}:
yield self._failing_status(
reason=(
f"depends_on_past is true for this task, but the previous task instance {previous_ti} "
f"is in the state '{previous_ti.state}' which is not a successful state."
)
)
return
previous_ti.task = ti.task
if ti.task.wait_for_downstream and not previous_ti.are_dependents_done(session=session):
yield self._failing_status(
reason=(
f"The tasks downstream of the previous task instance {previous_ti} haven't completed "
f"(and wait_for_downstream is True)."
)
)
return
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
| 5,387 | 40.767442 | 109 | py |
airflow | airflow-main/airflow/ti_deps/deps/mapped_task_expanded.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
class MappedTaskIsExpanded(BaseTIDep):
"""Checks that a mapped task has been expanded before it's TaskInstance can run."""
NAME = "Task has been mapped"
IGNORABLE = False
IS_TASK_DEP = False
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_unmapped_tasks:
return
if ti.map_index == -1:
yield self._failing_status(reason="The task has yet to be mapped!")
return
yield self._passing_status(reason="The task has been mapped")
| 1,413 | 37.216216 | 87 | py |
airflow | airflow-main/airflow/ti_deps/deps/dag_unpaused_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class DagUnpausedDep(BaseTIDep):
"""Determines whether a task's DAG is not paused."""
NAME = "Dag Not Paused"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.dag.get_is_paused(session):
yield self._failing_status(reason=f"Task's DAG '{ti.dag_id}' is paused.")
| 1,284 | 36.794118 | 85 | py |
airflow | airflow-main/airflow/ti_deps/deps/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implementation of specific dependencies for tasks."""
| 844 | 43.473684 | 62 | py |
airflow | airflow-main/airflow/ti_deps/deps/dag_ti_slots_available_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class DagTISlotsAvailableDep(BaseTIDep):
"""Determines whether a DAG maximum number of running tasks has been reached."""
NAME = "Task Instance Slots Available"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.dag.get_concurrency_reached(session):
yield self._failing_status(
reason=(
f"The maximum number of running tasks ({ti.task.dag.max_active_tasks}) for "
f"this task's DAG '{ti.dag_id}' has been reached."
)
)
| 1,524 | 38.102564 | 96 | py |
airflow | airflow-main/airflow/ti_deps/deps/task_concurrency_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class TaskConcurrencyDep(BaseTIDep):
"""This restricts the number of running task instances for a particular task."""
NAME = "Task Concurrency"
IGNORABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.max_active_tis_per_dag is None and ti.task.max_active_tis_per_dagrun is None:
yield self._passing_status(reason="Task concurrency is not set.")
return
if (
ti.task.max_active_tis_per_dag is not None
and ti.get_num_running_task_instances(session) >= ti.task.max_active_tis_per_dag
):
yield self._failing_status(reason="The max task concurrency has been reached.")
return
if (
ti.task.max_active_tis_per_dagrun is not None
and ti.get_num_running_task_instances(session, same_dagrun=True)
>= ti.task.max_active_tis_per_dagrun
):
yield self._failing_status(reason="The max task concurrency per run has been reached.")
return
yield self._passing_status(reason="The max task concurrency has not been reached.")
return
| 2,120 | 39.788462 | 99 | py |
airflow | airflow-main/airflow/ti_deps/deps/task_not_running_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Contains the TaskNotRunningDep."""
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
from airflow.utils.state import TaskInstanceState
class TaskNotRunningDep(BaseTIDep):
"""Ensures that the task instance's state is not running."""
NAME = "Task Instance Not Running"
IGNORABLE = False
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
@provide_session
def _get_dep_statuses(self, ti, session, dep_context=None):
if ti.state != TaskInstanceState.RUNNING:
yield self._passing_status(reason="Task is not in running state.")
return
yield self._failing_status(reason="Task is in the running state")
| 1,618 | 34.977778 | 78 | py |
airflow | airflow-main/airflow/ti_deps/deps/pool_slots_available_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module defines dep for pool slots availability."""
from __future__ import annotations
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class PoolSlotsAvailableDep(BaseTIDep):
"""Dep for pool slots availability."""
NAME = "Pool Slots Available"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context=None):
"""
Determines if the pool task instance is in has available slots.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
:return: True if there are available slots in the pool.
"""
from airflow.models.pool import Pool # To avoid a circular dependency
pool_name = ti.pool
pools = session.query(Pool).filter(Pool.pool == pool_name).all()
if not pools:
yield self._failing_status(
reason=f"Tasks using non-existent pool '{pool_name}' will not be scheduled"
)
return
else:
# Controlled by UNIQUE key in slot_pool table,
# only one result can be returned.
open_slots = pools[0].open_slots(session=session)
if ti.state in EXECUTION_STATES:
open_slots += ti.pool_slots
if open_slots <= (ti.pool_slots - 1):
yield self._failing_status(
reason=f"Not scheduling since there are {open_slots} open slots in pool {pool_name} "
f"and require {ti.pool_slots} pool slots"
)
else:
yield self._passing_status(
reason=f"There are enough open slots in {pool_name} to execute the task",
)
| 2,693 | 38.043478 | 101 | py |
airflow | airflow-main/airflow/ti_deps/deps/exec_date_after_start_date_dep.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class ExecDateAfterStartDateDep(BaseTIDep):
"""Determines whether a task's execution date is after start date."""
NAME = "Execution Date"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.start_date and ti.execution_date < ti.task.start_date:
yield self._failing_status(
reason=(
f"The execution date is {ti.execution_date.isoformat()} but this is before "
f"the task's start date {ti.task.start_date.isoformat()}."
)
)
if ti.task.dag and ti.task.dag.start_date and ti.execution_date < ti.task.dag.start_date:
yield self._failing_status(
reason=(
f"The execution date is {ti.execution_date.isoformat()} but this is "
f"before the task's DAG's start date {ti.task.dag.start_date.isoformat()}."
)
)
| 1,908 | 39.617021 | 97 | py |
airflow | airflow-main/airflow/callbacks/pipe_callback_sink.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Callable
from airflow.callbacks.base_callback_sink import BaseCallbackSink
from airflow.callbacks.callback_requests import CallbackRequest
class PipeCallbackSink(BaseCallbackSink):
"""
Class for sending callbacks to DagProcessor using pipe.
It is used when DagProcessor is not executed in standalone mode.
"""
def __init__(self, get_sink_pipe: Callable[[], MultiprocessingConnection]):
self._get_sink_pipe = get_sink_pipe
def send(self, callback: CallbackRequest):
"""
Sends information about the callback to be executed by Pipe.
:param callback: Callback request to be executed.
"""
try:
self._get_sink_pipe().send(callback)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
| 1,834 | 36.44898 | 79 | py |
airflow | airflow-main/airflow/callbacks/callback_requests.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from airflow.models.taskinstance import SimpleTaskInstance
class CallbackRequest:
"""
Base Class with information about the callback to be executed.
:param full_filepath: File Path to use to run the callback
:param msg: Additional Message that can be used for logging
:param processor_subdir: Directory used by Dag Processor when parsed the dag.
"""
def __init__(
self,
full_filepath: str,
processor_subdir: str | None = None,
msg: str | None = None,
):
self.full_filepath = full_filepath
self.processor_subdir = processor_subdir
self.msg = msg
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __repr__(self):
return str(self.__dict__)
def to_json(self) -> str:
return json.dumps(self.__dict__)
@classmethod
def from_json(cls, json_str: str):
json_object = json.loads(json_str)
return cls(**json_object)
class TaskCallbackRequest(CallbackRequest):
"""
Task callback status information.
A Class with information about the success/failure TI callback to be executed. Currently, only failure
callbacks (when tasks are externally killed) and Zombies are run via DagFileProcessorProcess.
:param full_filepath: File Path to use to run the callback
:param simple_task_instance: Simplified Task Instance representation
:param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback
:param msg: Additional Message that can be used for logging to determine failure/zombie
:param processor_subdir: Directory used by Dag Processor when parsed the dag.
"""
def __init__(
self,
full_filepath: str,
simple_task_instance: SimpleTaskInstance,
is_failure_callback: bool | None = True,
processor_subdir: str | None = None,
msg: str | None = None,
):
super().__init__(full_filepath=full_filepath, processor_subdir=processor_subdir, msg=msg)
self.simple_task_instance = simple_task_instance
self.is_failure_callback = is_failure_callback
def to_json(self) -> str:
from airflow.serialization.serialized_objects import BaseSerialization
val = BaseSerialization.serialize(self.__dict__, strict=True)
return json.dumps(val)
@classmethod
def from_json(cls, json_str: str):
from airflow.serialization.serialized_objects import BaseSerialization
val = json.loads(json_str)
return cls(**BaseSerialization.deserialize(val))
class DagCallbackRequest(CallbackRequest):
"""
A Class with information about the success/failure DAG callback to be executed.
:param full_filepath: File Path to use to run the callback
:param dag_id: DAG ID
:param run_id: Run ID for the DagRun
:param processor_subdir: Directory used by Dag Processor when parsed the dag.
:param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback
:param msg: Additional Message that can be used for logging
"""
def __init__(
self,
full_filepath: str,
dag_id: str,
run_id: str,
processor_subdir: str | None,
is_failure_callback: bool | None = True,
msg: str | None = None,
):
super().__init__(full_filepath=full_filepath, processor_subdir=processor_subdir, msg=msg)
self.dag_id = dag_id
self.run_id = run_id
self.is_failure_callback = is_failure_callback
class SlaCallbackRequest(CallbackRequest):
"""
A class with information about the SLA callback to be executed.
:param full_filepath: File Path to use to run the callback
:param dag_id: DAG ID
:param processor_subdir: Directory used by Dag Processor when parsed the dag.
"""
def __init__(
self,
full_filepath: str,
dag_id: str,
processor_subdir: str | None,
msg: str | None = None,
):
super().__init__(full_filepath, processor_subdir=processor_subdir, msg=msg)
self.dag_id = dag_id
| 5,101 | 33.707483 | 106 | py |
airflow | airflow-main/airflow/callbacks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/callbacks/base_callback_sink.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.callbacks.callback_requests import CallbackRequest
class BaseCallbackSink:
"""Base class for Callbacks Sinks."""
def send(self, callback: CallbackRequest) -> None:
"""Sends callback for execution."""
raise NotImplementedError()
| 1,091 | 36.655172 | 63 | py |
airflow | airflow-main/airflow/callbacks/database_callback_sink.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy.orm import Session
from airflow.callbacks.base_callback_sink import BaseCallbackSink
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.models.db_callback_request import DbCallbackRequest
from airflow.utils.session import NEW_SESSION, provide_session
class DatabaseCallbackSink(BaseCallbackSink):
"""Sends callbacks to database."""
@provide_session
def send(self, callback: CallbackRequest, session: Session = NEW_SESSION) -> None:
"""Sends callback for execution."""
db_callback = DbCallbackRequest(callback=callback, priority_weight=10)
session.add(db_callback)
| 1,469 | 39.833333 | 86 | py |
airflow | airflow-main/airflow/timetables/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, NamedTuple, Sequence
from warnings import warn
from pendulum import DateTime
from airflow.typing_compat import Protocol, runtime_checkable
if TYPE_CHECKING:
from airflow.utils.types import DagRunType
class DataInterval(NamedTuple):
"""A data interval for a DagRun to operate over.
Both ``start`` and ``end`` **MUST** be "aware", i.e. contain timezone
information.
"""
start: DateTime
end: DateTime
@classmethod
def exact(cls, at: DateTime) -> DataInterval:
"""Represent an "interval" containing only an exact time."""
return cls(start=at, end=at)
class TimeRestriction(NamedTuple):
"""Restriction on when a DAG can be scheduled for a run.
Specifically, the run must not be earlier than ``earliest``, nor later than
``latest``. If ``catchup`` is *False*, the run must also not be earlier than
the current time, i.e. "missed" schedules are not backfilled.
These values are generally set on the DAG or task's ``start_date``,
``end_date``, and ``catchup`` arguments.
Both ``earliest`` and ``latest``, if not *None*, are inclusive; a DAG run
can happen exactly at either point of time. They are guaranteed to be aware
(i.e. contain timezone information) for ``TimeRestriction`` instances
created by Airflow.
"""
earliest: DateTime | None
latest: DateTime | None
catchup: bool
class DagRunInfo(NamedTuple):
"""Information to schedule a DagRun.
Instances of this will be returned by timetables when they are asked to
schedule a DagRun creation.
"""
run_after: DateTime
"""The earliest time this DagRun is created and its tasks scheduled.
This **MUST** be "aware", i.e. contain timezone information.
"""
data_interval: DataInterval
"""The data interval this DagRun to operate over."""
@classmethod
def exact(cls, at: DateTime) -> DagRunInfo:
"""Represent a run on an exact time."""
return cls(run_after=at, data_interval=DataInterval.exact(at))
@classmethod
def interval(cls, start: DateTime, end: DateTime) -> DagRunInfo:
"""Represent a run on a continuous schedule.
In such a schedule, each data interval starts right after the previous
one ends, and each run is scheduled right after the interval ends. This
applies to all schedules prior to AIP-39 except ``@once`` and ``None``.
"""
return cls(run_after=end, data_interval=DataInterval(start, end))
@property
def logical_date(self: DagRunInfo) -> DateTime:
"""Infer the logical date to represent a DagRun.
This replaces ``execution_date`` in Airflow 2.1 and prior. The idea is
essentially the same, just a different name.
"""
return self.data_interval.start
@runtime_checkable
class Timetable(Protocol):
"""Protocol that all Timetable classes are expected to implement."""
description: str = ""
"""Human-readable description of the timetable.
For example, this can produce something like ``'At 21:30, only on Friday'``
from the cron expression ``'30 21 * * 5'``. This is used in the webserver UI.
"""
periodic: bool = True
"""Whether this timetable runs periodically.
This defaults to and should generally be *True*, but some special setups
like ``schedule=None`` and ``"@once"`` set it to *False*.
"""
_can_be_scheduled: bool = True
@property
def can_be_scheduled(self):
if hasattr(self, "can_run"):
warn(
'can_run class variable is deprecated. Use "can_be_scheduled" instead.',
DeprecationWarning,
stacklevel=2,
)
return self.can_run
return self._can_be_scheduled
"""Whether this timetable can actually schedule runs in an automated manner.
This defaults to and should generally be *True* (including non periodic
execution types like *@once* and data triggered tables), but
``NullTimetable`` sets this to *False*.
"""
run_ordering: Sequence[str] = ("data_interval_end", "execution_date")
"""How runs triggered from this timetable should be ordered in UI.
This should be a list of field names on the DAG run object.
"""
active_runs_limit: int | None = None
"""Override the max_active_runs parameter of any DAGs using this timetable.
This is called during DAG initializing, and will set the max_active_runs if
it returns a value. In most cases this should return None, but in some cases
(for example, the ContinuousTimetable) there are good reasons for limiting
the DAGRun parallelism.
"""
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
"""Deserialize a timetable from data.
This is called when a serialized DAG is deserialized. ``data`` will be
whatever was returned by ``serialize`` during DAG serialization. The
default implementation constructs the timetable without any arguments.
"""
return cls()
def serialize(self) -> dict[str, Any]:
"""Serialize the timetable for JSON encoding.
This is called during DAG serialization to store timetable information
in the database. This should return a JSON-serializable dict that will
be fed into ``deserialize`` when the DAG is deserialized. The default
implementation returns an empty dict.
"""
return {}
def validate(self) -> None:
"""Validate the timetable is correctly specified.
Override this method to provide run-time validation raised when a DAG
is put into a dagbag. The default implementation does nothing.
:raises: AirflowTimetableInvalid on validation failure.
"""
return
@property
def summary(self) -> str:
"""A short summary for the timetable.
This is used to display the timetable in the web UI. A cron expression
timetable, for example, can use this to display the expression. The
default implementation returns the timetable's type name.
"""
return type(self).__name__
def infer_manual_data_interval(self, *, run_after: DateTime) -> DataInterval:
"""When a DAG run is manually triggered, infer a data interval for it.
This is used for e.g. manually-triggered runs, where ``run_after`` would
be when the user triggers the run. The default implementation raises
``NotImplementedError``.
"""
raise NotImplementedError()
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
"""Provide information to schedule the next DagRun.
The default implementation raises ``NotImplementedError``.
:param last_automated_data_interval: The data interval of the associated
DAG's last scheduled or backfilled run (manual runs not considered).
:param restriction: Restriction to apply when scheduling the DAG run.
See documentation of :class:`TimeRestriction` for details.
:return: Information on when the next DagRun can be scheduled. None
means a DagRun will not happen. This does not mean no more runs
will be scheduled even again for this DAG; the timetable can return
a DagRunInfo object when asked at another time.
"""
raise NotImplementedError()
def generate_run_id(
self,
*,
run_type: DagRunType,
logical_date: DateTime,
data_interval: DataInterval | None,
**extra,
) -> str:
return run_type.generate_run_id(logical_date)
| 8,630 | 34.9625 | 88 | py |
airflow | airflow-main/airflow/timetables/events.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import itertools
from typing import Iterable
import pendulum
from pendulum import DateTime
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
class EventsTimetable(Timetable):
"""
Timetable that schedules DAG runs at specific listed datetimes.
Suitable for predictable but truly irregular scheduling such as sporting events.
:param event_dates: List of datetimes for the DAG to run at. Duplicates will be ignored. Must be finite
and of reasonable size as it will be loaded in its entirety.
:param restrict_to_events: Whether manual runs should use the most recent event or
the current time
:param presorted: if True, event_dates will be assumed to be in ascending order. Provides modest
performance improvement for larger lists of event_dates.
:param description: A name for the timetable to display in the UI. Default None will be shown as
"X Events" where X is the len of event_dates
"""
def __init__(
self,
event_dates: Iterable[DateTime],
restrict_to_events: bool = False,
presorted: bool = False,
description: str | None = None,
):
self.event_dates = list(event_dates) # Must be reversible and indexable
if not presorted:
# For long lists this could take a while, so only want to do it once
self.event_dates = sorted(self.event_dates)
self.restrict_to_events = restrict_to_events
if description is None:
self.description = (
f"{len(self.event_dates)} Events between {self.event_dates[0]} and {self.event_dates[-1]}"
)
self._summary = f"{len(self.event_dates)} Events"
else:
self._summary = description
self.description = description
@property
def summary(self) -> str:
return self._summary
def __repr__(self):
return self.summary
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
if last_automated_data_interval is None:
next_event = self.event_dates[0]
else:
future_dates = itertools.dropwhile(
lambda when: when <= last_automated_data_interval.end, self.event_dates # type: ignore
)
next_event = next(future_dates, None) # type: ignore
if next_event is None:
return None
return DagRunInfo.exact(next_event)
def infer_manual_data_interval(self, *, run_after: DateTime) -> DataInterval:
# If Timetable not restricted to events, run for the time specified
if not self.restrict_to_events:
return DataInterval.exact(run_after)
# If restricted to events, run for the most recent past event
# or for the first event if all events are in the future
if run_after < self.event_dates[0]:
return DataInterval.exact(self.event_dates[0])
else:
past_events = itertools.dropwhile(lambda when: when > run_after, self.event_dates[::-1])
most_recent_event = next(past_events)
return DataInterval.exact(most_recent_event)
def serialize(self):
return {
"event_dates": [str(x) for x in self.event_dates],
"restrict_to_events": self.restrict_to_events,
}
@classmethod
def deserialize(cls, data) -> Timetable:
return cls(
[pendulum.DateTime.fromisoformat(x) for x in data["event_dates"]],
data["restrict_to_events"],
presorted=True,
)
| 4,566 | 37.70339 | 107 | py |
airflow | airflow-main/airflow/timetables/simple.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import operator
from typing import TYPE_CHECKING, Any, Collection
from pendulum import DateTime
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
if TYPE_CHECKING:
from sqlalchemy import Session
from airflow.models.dataset import DatasetEvent
from airflow.utils.types import DagRunType
class _TrivialTimetable(Timetable):
"""Some code reuse for "trivial" timetables that has nothing complex."""
periodic = False
run_ordering = ("execution_date",)
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
return cls()
def __eq__(self, other: Any) -> bool:
"""As long as *other* is of the same type.
This is only for testing purposes and should not be relied on otherwise.
"""
if not isinstance(other, type(self)):
return NotImplemented
return True
def serialize(self) -> dict[str, Any]:
return {}
def infer_manual_data_interval(self, *, run_after: DateTime) -> DataInterval:
return DataInterval.exact(run_after)
class NullTimetable(_TrivialTimetable):
"""Timetable that never schedules anything.
This corresponds to ``schedule=None``.
"""
can_be_scheduled = False
description: str = "Never, external triggers only"
@property
def summary(self) -> str:
return "None"
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
return None
class OnceTimetable(_TrivialTimetable):
"""Timetable that schedules the execution once as soon as possible.
This corresponds to ``schedule="@once"``.
"""
description: str = "Once, as soon as possible"
@property
def summary(self) -> str:
return "@once"
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
if last_automated_data_interval is not None:
return None # Already run, no more scheduling.
if restriction.earliest is None: # No start date, won't run.
return None
# "@once" always schedule to the start_date determined by the DAG and
# tasks, regardless of catchup or not. This has been the case since 1.10
# and we're inheriting it. See AIRFLOW-1928.
run_after = restriction.earliest
if restriction.latest is not None and run_after > restriction.latest:
return None
return DagRunInfo.exact(run_after)
class ContinuousTimetable(_TrivialTimetable):
"""Timetable that schedules continually, while still respecting start_date and end_date.
This corresponds to ``schedule="@continuous"``.
"""
description: str = "As frequently as possible, but only one run at a time."
active_runs_limit = 1 # Continuous DAGRuns should be constrained to one run at a time
@property
def summary(self) -> str:
return "@continuous"
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
if restriction.earliest is None: # No start date, won't run.
return None
if last_automated_data_interval is not None: # has already run once
start = last_automated_data_interval.end
end = DateTime.utcnow()
else: # first run
start = restriction.earliest
end = max(restriction.earliest, DateTime.utcnow()) # won't run any earlier than start_date
if restriction.latest is not None and end > restriction.latest:
return None
return DagRunInfo.interval(start, end)
class DatasetTriggeredTimetable(_TrivialTimetable):
"""Timetable that never schedules anything.
This should not be directly used anywhere, but only set if a DAG is triggered by datasets.
:meta private:
"""
description: str = "Triggered by datasets"
@property
def summary(self) -> str:
return "Dataset"
def generate_run_id(
self,
*,
run_type: DagRunType,
logical_date: DateTime,
data_interval: DataInterval | None,
session: Session | None = None,
events: Collection[DatasetEvent] | None = None,
**extra,
) -> str:
from airflow.models.dagrun import DagRun
return DagRun.generate_run_id(run_type, logical_date)
def data_interval_for_events(
self,
logical_date: DateTime,
events: Collection[DatasetEvent],
) -> DataInterval:
if not events:
return DataInterval(logical_date, logical_date)
start = min(
events, key=operator.attrgetter("source_dag_run.data_interval_start")
).source_dag_run.data_interval_start
end = max(
events, key=operator.attrgetter("source_dag_run.data_interval_end")
).source_dag_run.data_interval_end
return DataInterval(start, end)
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
return None
| 6,179 | 30.055276 | 103 | py |
airflow | airflow-main/airflow/timetables/trigger.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
from typing import Any
from dateutil.relativedelta import relativedelta
from pendulum import DateTime
from pendulum.tz.timezone import Timezone
from airflow.timetables._cron import CronMixin
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
class CronTriggerTimetable(CronMixin, Timetable):
"""Timetable that triggers DAG runs according to a cron expression.
This is different from ``CronDataIntervalTimetable``, where the cron
expression specifies the *data interval* of a DAG run. With this timetable,
the data intervals are specified independently from the cron expression.
Also for the same reason, this timetable kicks off a DAG run immediately at
the start of the period (similar to POSIX cron), instead of needing to wait
for one data interval to pass.
Don't pass ``@once`` in here; use ``OnceTimetable`` instead.
"""
def __init__(
self,
cron: str,
*,
timezone: str | Timezone,
interval: datetime.timedelta | relativedelta = datetime.timedelta(),
) -> None:
super().__init__(cron, timezone)
self._interval = interval
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
from airflow.serialization.serialized_objects import decode_relativedelta, decode_timezone
interval: datetime.timedelta | relativedelta
if isinstance(data["interval"], dict):
interval = decode_relativedelta(data["interval"])
else:
interval = datetime.timedelta(seconds=data["interval"])
return cls(data["expression"], timezone=decode_timezone(data["timezone"]), interval=interval)
def serialize(self) -> dict[str, Any]:
from airflow.serialization.serialized_objects import encode_relativedelta, encode_timezone
interval: float | dict[str, Any]
if isinstance(self._interval, datetime.timedelta):
interval = self._interval.total_seconds()
else:
interval = encode_relativedelta(self._interval)
timezone = encode_timezone(self._timezone)
return {"expression": self._expression, "timezone": timezone, "interval": interval}
def infer_manual_data_interval(self, *, run_after: DateTime) -> DataInterval:
return DataInterval(run_after - self._interval, run_after)
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
if restriction.catchup:
if last_automated_data_interval is not None:
next_start_time = self._get_next(last_automated_data_interval.end)
elif restriction.earliest is None:
return None # Don't know where to catch up from, give up.
else:
next_start_time = self._align_to_next(restriction.earliest)
else:
start_time_candidates = [self._align_to_next(DateTime.utcnow())]
if last_automated_data_interval is not None:
start_time_candidates.append(self._get_next(last_automated_data_interval.end))
if restriction.earliest is not None:
start_time_candidates.append(self._align_to_next(restriction.earliest))
next_start_time = max(start_time_candidates)
if restriction.latest is not None and restriction.latest < next_start_time:
return None
return DagRunInfo.interval(next_start_time - self._interval, next_start_time)
| 4,393 | 42.50495 | 101 | py |
airflow | airflow-main/airflow/timetables/_cron.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
from functools import cached_property
from typing import Any
from cron_descriptor import CasingTypeEnum, ExpressionDescriptor, FormatException, MissingFieldException
from croniter import CroniterBadCronError, CroniterBadDateError, croniter
from pendulum import DateTime
from pendulum.tz.timezone import Timezone
from airflow.exceptions import AirflowTimetableInvalid
from airflow.utils.dates import cron_presets
from airflow.utils.timezone import convert_to_utc, make_aware, make_naive
def _is_schedule_fixed(expression: str) -> bool:
"""Figures out if the schedule has a fixed time (e.g. 3 AM every day).
:return: True if the schedule has a fixed time, False if not.
Detection is done by "peeking" the next two cron trigger time; if the
two times have the same minute and hour value, the schedule is fixed,
and we *don't* need to perform the DST fix.
This assumes DST happens on whole minute changes (e.g. 12:59 -> 12:00).
"""
cron = croniter(expression)
next_a = cron.get_next(datetime.datetime)
next_b = cron.get_next(datetime.datetime)
return next_b.minute == next_a.minute and next_b.hour == next_a.hour
class CronMixin:
"""Mixin to provide interface to work with croniter."""
def __init__(self, cron: str, timezone: str | Timezone) -> None:
self._expression = cron_presets.get(cron, cron)
if isinstance(timezone, str):
timezone = Timezone(timezone)
self._timezone = timezone
descriptor = ExpressionDescriptor(
expression=self._expression, casing_type=CasingTypeEnum.Sentence, use_24hour_time_format=True
)
try:
# checking for more than 5 parameters in Cron and avoiding evaluation for now,
# as Croniter has inconsistent evaluation with other libraries
if len(croniter(self._expression).expanded) > 5:
raise FormatException()
interval_description = descriptor.get_description()
except (CroniterBadCronError, FormatException, MissingFieldException):
interval_description = ""
self.description = interval_description
def __eq__(self, other: Any) -> bool:
"""Both expression and timezone should match.
This is only for testing purposes and should not be relied on otherwise.
"""
if not isinstance(other, type(self)):
return NotImplemented
return self._expression == other._expression and self._timezone == other._timezone
@property
def summary(self) -> str:
return self._expression
def validate(self) -> None:
try:
croniter(self._expression)
except (CroniterBadCronError, CroniterBadDateError) as e:
raise AirflowTimetableInvalid(str(e))
@cached_property
def _should_fix_dst(self) -> bool:
# This is lazy so instantiating a schedule does not immediately raise
# an exception. Validity is checked with validate() during DAG-bagging.
return not _is_schedule_fixed(self._expression)
def _get_next(self, current: DateTime) -> DateTime:
"""Get the first schedule after specified time, with DST fixed."""
naive = make_naive(current, self._timezone)
cron = croniter(self._expression, start_time=naive)
scheduled = cron.get_next(datetime.datetime)
if not self._should_fix_dst:
return convert_to_utc(make_aware(scheduled, self._timezone))
delta = scheduled - naive
return convert_to_utc(current.in_timezone(self._timezone) + delta)
def _get_prev(self, current: DateTime) -> DateTime:
"""Get the first schedule before specified time, with DST fixed."""
naive = make_naive(current, self._timezone)
cron = croniter(self._expression, start_time=naive)
scheduled = cron.get_prev(datetime.datetime)
if not self._should_fix_dst:
return convert_to_utc(make_aware(scheduled, self._timezone))
delta = naive - scheduled
return convert_to_utc(current.in_timezone(self._timezone) - delta)
def _align_to_next(self, current: DateTime) -> DateTime:
"""Get the next scheduled time.
This is ``current + interval``, unless ``current`` falls right on the
interval boundary, when ``current`` is returned.
"""
next_time = self._get_next(current)
if self._get_prev(next_time) != current:
return next_time
return current
def _align_to_prev(self, current: DateTime) -> DateTime:
"""Get the prev scheduled time.
This is ``current - interval``, unless ``current`` falls right on the
interval boundary, when ``current`` is returned.
"""
prev_time = self._get_prev(current)
if self._get_next(prev_time) != current:
return prev_time
return current
| 5,724 | 40.18705 | 105 | py |
airflow | airflow-main/airflow/timetables/interval.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
from typing import Any, Union
from dateutil.relativedelta import relativedelta
from pendulum import DateTime
from airflow.exceptions import AirflowTimetableInvalid
from airflow.timetables._cron import CronMixin
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
from airflow.utils.timezone import convert_to_utc
Delta = Union[datetime.timedelta, relativedelta]
class _DataIntervalTimetable(Timetable):
"""Basis for timetable implementations that schedule data intervals.
This kind of timetable classes create periodic data intervals from an
underlying schedule representation (e.g. a cron expression, or a timedelta
instance), and schedule a DagRun at the end of each interval.
"""
def _skip_to_latest(self, earliest: DateTime | None) -> DateTime:
"""Bound the earliest time a run can be scheduled.
This is called when ``catchup=False``. See docstring of subclasses for
exact skipping behaviour of a schedule.
"""
raise NotImplementedError()
def _align_to_next(self, current: DateTime) -> DateTime:
"""Align given time to the next scheduled time.
For fixed schedules (e.g. every midnight); this finds the next time that
aligns to the declared time, if the given time does not align. If the
schedule is not fixed (e.g. every hour), the given time is returned.
"""
raise NotImplementedError()
def _align_to_prev(self, current: DateTime) -> DateTime:
"""Align given time to the previous scheduled time.
For fixed schedules (e.g. every midnight); this finds the prev time that
aligns to the declared time, if the given time does not align. If the
schedule is not fixed (e.g. every hour), the given time is returned.
It is not enough to use ``_get_prev(_align_to_next())``, since when a
DAG's schedule changes, this alternative would make the first scheduling
after the schedule change remain the same.
"""
raise NotImplementedError()
def _get_next(self, current: DateTime) -> DateTime:
"""Get the first schedule after the current time."""
raise NotImplementedError()
def _get_prev(self, current: DateTime) -> DateTime:
"""Get the last schedule before the current time."""
raise NotImplementedError()
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
earliest = restriction.earliest
if not restriction.catchup:
earliest = self._skip_to_latest(earliest)
elif earliest is not None:
earliest = self._align_to_next(earliest)
if last_automated_data_interval is None:
# First run; schedule the run at the first available time matching
# the schedule, and retrospectively create a data interval for it.
if earliest is None:
return None
start = earliest
else: # There's a previous run.
# Alignment is needed when DAG has new schedule interval.
align_last_data_interval_end = self._align_to_prev(last_automated_data_interval.end)
if earliest is not None:
# Catchup is False or DAG has new start date in the future.
# Make sure we get the later one.
start = max(align_last_data_interval_end, earliest)
else:
# Data interval starts from the end of the previous interval.
start = align_last_data_interval_end
if restriction.latest is not None and start > restriction.latest:
return None
end = self._get_next(start)
return DagRunInfo.interval(start=start, end=end)
class CronDataIntervalTimetable(CronMixin, _DataIntervalTimetable):
"""Timetable that schedules data intervals with a cron expression.
This corresponds to ``schedule=<cron>``, where ``<cron>`` is either
a five/six-segment representation, or one of ``cron_presets``.
The implementation extends on croniter to add timezone awareness. This is
because croniter works only with naive timestamps, and cannot consider DST
when determining the next/previous time.
Don't pass ``@once`` in here; use ``OnceTimetable`` instead.
"""
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
from airflow.serialization.serialized_objects import decode_timezone
return cls(data["expression"], decode_timezone(data["timezone"]))
def serialize(self) -> dict[str, Any]:
from airflow.serialization.serialized_objects import encode_timezone
return {"expression": self._expression, "timezone": encode_timezone(self._timezone)}
def _skip_to_latest(self, earliest: DateTime | None) -> DateTime:
"""Bound the earliest time a run can be scheduled.
The logic is that we move start_date up until one period before, so the
current time is AFTER the period end, and the job can be created...
This is slightly different from the delta version at terminal values.
If the next schedule should start *right now*, we want the data interval
that start now, not the one that ends now.
"""
current_time = DateTime.utcnow()
last_start = self._get_prev(current_time)
next_start = self._get_next(last_start)
if next_start == current_time: # Current time is on interval boundary.
new_start = last_start
elif next_start > current_time: # Current time is between boundaries.
new_start = self._get_prev(last_start)
else:
raise AssertionError("next schedule shouldn't be earlier")
if earliest is None:
return new_start
return max(new_start, self._align_to_next(earliest))
def infer_manual_data_interval(self, *, run_after: DateTime) -> DataInterval:
# Get the last complete period before run_after, e.g. if a DAG run is
# scheduled at each midnight, the data interval of a manually triggered
# run at 1am 25th is between 0am 24th and 0am 25th.
end = self._align_to_prev(run_after)
return DataInterval(start=self._get_prev(end), end=end)
class DeltaDataIntervalTimetable(_DataIntervalTimetable):
"""Timetable that schedules data intervals with a time delta.
This corresponds to ``schedule=<delta>``, where ``<delta>`` is
either a ``datetime.timedelta`` or ``dateutil.relativedelta.relativedelta``
instance.
"""
def __init__(self, delta: Delta) -> None:
self._delta = delta
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
from airflow.serialization.serialized_objects import decode_relativedelta
delta = data["delta"]
if isinstance(delta, dict):
return cls(decode_relativedelta(delta))
return cls(datetime.timedelta(seconds=delta))
def __eq__(self, other: Any) -> bool:
"""The offset should match.
This is only for testing purposes and should not be relied on otherwise.
"""
if not isinstance(other, DeltaDataIntervalTimetable):
return NotImplemented
return self._delta == other._delta
@property
def summary(self) -> str:
return str(self._delta)
def serialize(self) -> dict[str, Any]:
from airflow.serialization.serialized_objects import encode_relativedelta
delta: Any
if isinstance(self._delta, datetime.timedelta):
delta = self._delta.total_seconds()
else:
delta = encode_relativedelta(self._delta)
return {"delta": delta}
def validate(self) -> None:
now = datetime.datetime.now()
if (now + self._delta) <= now:
raise AirflowTimetableInvalid(f"schedule interval must be positive, not {self._delta!r}")
def _get_next(self, current: DateTime) -> DateTime:
return convert_to_utc(current + self._delta)
def _get_prev(self, current: DateTime) -> DateTime:
return convert_to_utc(current - self._delta)
def _align_to_next(self, current: DateTime) -> DateTime:
return current
def _align_to_prev(self, current: DateTime) -> DateTime:
return current
def _skip_to_latest(self, earliest: DateTime | None) -> DateTime:
"""Bound the earliest time a run can be scheduled.
The logic is that we move start_date up until one period before, so the
current time is AFTER the period end, and the job can be created...
This is slightly different from the cron version at terminal values.
"""
new_start = self._get_prev(DateTime.utcnow())
if earliest is None:
return new_start
return max(new_start, earliest)
def infer_manual_data_interval(self, run_after: DateTime) -> DataInterval:
return DataInterval(start=self._get_prev(run_after), end=run_after)
| 9,932 | 40.045455 | 101 | py |
airflow | airflow-main/airflow/timetables/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/security/kerberos.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kerberos security provider."""
import logging
import shlex
import subprocess
import sys
import time
from airflow.configuration import conf
from airflow.utils.net import get_hostname
NEED_KRB181_WORKAROUND: bool | None = None
log = logging.getLogger(__name__)
def renew_from_kt(principal: str | None, keytab: str, exit_on_fail: bool = True):
"""
Renew kerberos token from keytab.
:param principal: principal
:param keytab: keytab file
:return: None
"""
# The config is specified in seconds. But we ask for that same amount in
# minutes to give ourselves a large renewal buffer.
renewal_lifetime = f"{conf.getint('kerberos', 'reinit_frequency')}m"
cmd_principal = principal or conf.get_mandatory_value("kerberos", "principal").replace(
"_HOST", get_hostname()
)
if conf.getboolean("kerberos", "forwardable"):
forwardable = "-f"
else:
forwardable = "-F"
if conf.getboolean("kerberos", "include_ip"):
include_ip = "-a"
else:
include_ip = "-A"
cmdv: list[str] = [
conf.get_mandatory_value("kerberos", "kinit_path"),
forwardable,
include_ip,
"-r",
renewal_lifetime,
"-k", # host ticket
"-t",
keytab, # specify keytab
"-c",
conf.get_mandatory_value("kerberos", "ccache"), # specify credentials cache
cmd_principal,
]
log.info("Re-initialising kerberos from keytab: %s", " ".join(shlex.quote(f) for f in cmdv))
with subprocess.Popen(
cmdv,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
bufsize=-1,
universal_newlines=True,
) as subp:
subp.wait()
if subp.returncode != 0:
log.error(
"Couldn't reinit from keytab! `kinit' exited with %s.\n%s\n%s",
subp.returncode,
"\n".join(subp.stdout.readlines() if subp.stdout else []),
"\n".join(subp.stderr.readlines() if subp.stderr else []),
)
if exit_on_fail:
sys.exit(subp.returncode)
else:
return subp.returncode
global NEED_KRB181_WORKAROUND
if NEED_KRB181_WORKAROUND is None:
NEED_KRB181_WORKAROUND = detect_conf_var()
if NEED_KRB181_WORKAROUND:
# (From: HUE-640). Kerberos clock have seconds level granularity. Make sure we
# renew the ticket after the initial valid time.
time.sleep(1.5)
ret = perform_krb181_workaround(cmd_principal)
if exit_on_fail and ret != 0:
sys.exit(ret)
else:
return ret
return 0
def perform_krb181_workaround(principal: str):
"""
Workaround for Kerberos 1.8.1.
:param principal: principal name
:return: None
"""
cmdv: list[str] = [
conf.get_mandatory_value("kerberos", "kinit_path"),
"-c",
conf.get_mandatory_value("kerberos", "ccache"),
"-R",
] # Renew ticket_cache
log.info("Renewing kerberos ticket to work around kerberos 1.8.1: %s", " ".join(cmdv))
ret = subprocess.call(cmdv, close_fds=True)
if ret != 0:
principal = f"{principal or conf.get('kerberos', 'principal')}/{get_hostname()}"
ccache = conf.get("kerberos", "ccache")
log.error(
"Couldn't renew kerberos ticket in order to work around Kerberos 1.8.1 issue. Please check that "
"the ticket for '%s' is still renewable:\n $ kinit -f -c %s\nIf the 'renew until' date is the "
"same as the 'valid starting' date, the ticket cannot be renewed. Please check your KDC "
"configuration, and the ticket renewal policy (maxrenewlife) for the '%s' and `krbtgt' "
"principals.",
principal,
ccache,
principal,
)
return ret
def detect_conf_var() -> bool:
"""
Autodetect the Kerberos ticket configuration.
Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it.
"""
ticket_cache = conf.get_mandatory_value("kerberos", "ccache")
with open(ticket_cache, "rb") as file:
# Note: this file is binary, so we check against a bytearray.
return b"X-CACHECONF:" in file.read()
def run(principal: str | None, keytab: str):
"""
Run the kerbros renewer.
:param principal: principal name
:param keytab: keytab file
:return: None
"""
if not keytab:
log.warning("Keytab renewer not starting, no keytab configured")
sys.exit(0)
while True:
renew_from_kt(principal, keytab)
time.sleep(conf.getint("kerberos", "reinit_frequency"))
| 6,495 | 32.833333 | 109 | py |
airflow | airflow-main/airflow/security/permissions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
# Resource Constants
RESOURCE_ACTION = "Permissions"
RESOURCE_ADMIN_MENU = "Admin"
RESOURCE_AIRFLOW = "Airflow"
RESOURCE_AUDIT_LOG = "Audit Logs"
RESOURCE_BROWSE_MENU = "Browse"
RESOURCE_CONFIG = "Configurations"
RESOURCE_CONNECTION = "Connections"
RESOURCE_DAG = "DAGs"
RESOURCE_DAG_CODE = "DAG Code"
RESOURCE_DAG_DEPENDENCIES = "DAG Dependencies"
RESOURCE_DAG_PREFIX = "DAG:"
RESOURCE_DAG_RUN = "DAG Runs"
RESOURCE_DAG_WARNING = "DAG Warnings"
RESOURCE_CLUSTER_ACTIVITY = "Cluster Activity"
RESOURCE_DATASET = "Datasets"
RESOURCE_DOCS = "Documentation"
RESOURCE_DOCS_MENU = "Docs"
RESOURCE_IMPORT_ERROR = "ImportError"
RESOURCE_JOB = "Jobs"
RESOURCE_LOGIN = "Logins"
RESOURCE_MY_PASSWORD = "My Password"
RESOURCE_MY_PROFILE = "My Profile"
RESOURCE_PASSWORD = "Passwords"
RESOURCE_PERMISSION = "Permission Views" # Refers to a Perm <-> View mapping, not an MVC View.
RESOURCE_PLUGIN = "Plugins"
RESOURCE_POOL = "Pools"
RESOURCE_PROVIDER = "Providers"
RESOURCE_RESOURCE = "View Menus"
RESOURCE_ROLE = "Roles"
RESOURCE_SLA_MISS = "SLA Misses"
RESOURCE_TASK_INSTANCE = "Task Instances"
RESOURCE_TASK_LOG = "Task Logs"
RESOURCE_TASK_RESCHEDULE = "Task Reschedules"
RESOURCE_TRIGGER = "Triggers"
RESOURCE_USER = "Users"
RESOURCE_USER_STATS_CHART = "User Stats Chart"
RESOURCE_VARIABLE = "Variables"
RESOURCE_WEBSITE = "Website"
RESOURCE_XCOM = "XComs"
# Action Constants
ACTION_CAN_CREATE = "can_create"
ACTION_CAN_READ = "can_read"
ACTION_CAN_EDIT = "can_edit"
ACTION_CAN_DELETE = "can_delete"
ACTION_CAN_ACCESS_MENU = "menu_access"
DEPRECATED_ACTION_CAN_DAG_READ = "can_dag_read"
DEPRECATED_ACTION_CAN_DAG_EDIT = "can_dag_edit"
DAG_ACTIONS = {ACTION_CAN_READ, ACTION_CAN_EDIT, ACTION_CAN_DELETE}
def resource_name_for_dag(root_dag_id: str) -> str:
"""Returns the resource name for a DAG id.
Note that since a sub-DAG should follow the permission of its
parent DAG, you should pass ``DagModel.root_dag_id`` to this function,
for a subdag. A normal dag should pass the ``DagModel.dag_id``.
"""
if root_dag_id == RESOURCE_DAG:
return root_dag_id
if root_dag_id.startswith(RESOURCE_DAG_PREFIX):
return root_dag_id
return f"{RESOURCE_DAG_PREFIX}{root_dag_id}"
| 3,028 | 35.059524 | 95 | py |
airflow | airflow-main/airflow/security/utils.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Various security-related utils."""
import socket
import re2
from airflow.utils.net import get_hostname
def get_components(principal) -> list[str] | None:
"""Split the kerberos principal string into parts.
:return: *None* if the principal is empty. Otherwise split the value into
parts. Assuming the principal string is valid, the return value should
contain three components: short name, instance (FQDN), and realm.
"""
if not principal:
return None
return re2.split(r"[/@]", str(principal))
def replace_hostname_pattern(components, host=None):
"""Replaces hostname with the right pattern including lowercase of the name."""
fqdn = host
if not fqdn or fqdn == "0.0.0.0":
fqdn = get_hostname()
return f"{components[0]}/{fqdn.lower()}@{components[2]}"
def get_fqdn(hostname_or_ip=None):
"""Retrieves FQDN - hostname for the IP or hostname."""
try:
if hostname_or_ip:
fqdn = socket.gethostbyaddr(hostname_or_ip)[0]
if fqdn == "localhost":
fqdn = get_hostname()
else:
fqdn = get_hostname()
except OSError:
fqdn = hostname_or_ip
return fqdn
def principal_from_username(username, realm):
"""Retrieves principal from the user name and realm."""
if ("@" not in username) and realm:
username = f"{username}@{realm}"
return username
| 3,033 | 34.694118 | 83 | py |
airflow | airflow-main/airflow/security/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/secrets/metastore.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects relating to sourcing connections from metastore database."""
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING
from sqlalchemy import select
from sqlalchemy.orm import Session
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.secrets import BaseSecretsBackend
from airflow.utils.session import NEW_SESSION, provide_session
if TYPE_CHECKING:
from airflow.models.connection import Connection
class MetastoreBackend(BaseSecretsBackend):
"""Retrieves Connection object and Variable from airflow metastore database."""
@provide_session
def get_connection(self, conn_id: str, session: Session = NEW_SESSION) -> Connection | None:
from airflow.models.connection import Connection
conn = session.scalar(select(Connection).where(Connection.conn_id == conn_id).limit(1))
session.expunge_all()
return conn
@provide_session
def get_connections(self, conn_id: str, session: Session = NEW_SESSION) -> list[Connection]:
warnings.warn(
"This method is deprecated. Please use "
"`airflow.secrets.metastore.MetastoreBackend.get_connection`.",
RemovedInAirflow3Warning,
stacklevel=3,
)
conn = self.get_connection(conn_id=conn_id, session=session)
if conn:
return [conn]
return []
@provide_session
def get_variable(self, key: str, session: Session = NEW_SESSION) -> str | None:
"""
Get Airflow Variable from Metadata DB.
:param key: Variable Key
:return: Variable Value
"""
from airflow.models.variable import Variable
var_value = session.scalar(select(Variable).where(Variable.key == key).limit(1))
session.expunge_all()
if var_value:
return var_value.val
return None
| 2,668 | 35.067568 | 96 | py |
airflow | airflow-main/airflow/secrets/base_secrets.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from abc import ABC
from typing import TYPE_CHECKING
from airflow.exceptions import RemovedInAirflow3Warning
if TYPE_CHECKING:
from airflow.models.connection import Connection
class BaseSecretsBackend(ABC):
"""Abstract base class to retrieve Connection object given a conn_id or Variable given a key."""
@staticmethod
def build_path(path_prefix: str, secret_id: str, sep: str = "/") -> str:
"""
Given conn_id, build path for Secrets Backend.
:param path_prefix: Prefix of the path to get secret
:param secret_id: Secret id
:param sep: separator used to concatenate connections_prefix and conn_id. Default: "/"
"""
return f"{path_prefix}{sep}{secret_id}"
def get_conn_value(self, conn_id: str) -> str | None:
"""
Retrieve from Secrets Backend a string value representing the Connection object.
If the client your secrets backend uses already returns a python dict, you should override
``get_connection`` instead.
:param conn_id: connection id
"""
raise NotImplementedError
def deserialize_connection(self, conn_id: str, value: str) -> Connection:
"""
Given a serialized representation of the airflow Connection, return an instance.
Looks at first character to determine how to deserialize.
:param conn_id: connection id
:param value: the serialized representation of the Connection object
:return: the deserialized Connection
"""
from airflow.models.connection import Connection
value = value.strip()
if value[0] == "{":
return Connection.from_json(conn_id=conn_id, value=value)
else:
return Connection(conn_id=conn_id, uri=value)
def get_conn_uri(self, conn_id: str) -> str | None:
"""
Get conn_uri from Secrets Backend.
This method is deprecated and will be removed in a future release; implement ``get_conn_value``
instead.
:param conn_id: connection id
"""
raise NotImplementedError()
def get_connection(self, conn_id: str) -> Connection | None:
"""
Return connection object with a given ``conn_id``.
Tries ``get_conn_value`` first and if not implemented, tries ``get_conn_uri``
:param conn_id: connection id
"""
value = None
not_implemented_get_conn_value = False
# TODO: after removal of ``get_conn_uri`` we should not catch NotImplementedError here
try:
value = self.get_conn_value(conn_id=conn_id)
except NotImplementedError:
not_implemented_get_conn_value = True
warnings.warn(
"Method `get_conn_uri` is deprecated. Please use `get_conn_value`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
if not_implemented_get_conn_value:
try:
value = self.get_conn_uri(conn_id=conn_id)
except NotImplementedError:
raise NotImplementedError(
f"Secrets backend {self.__class__.__name__} neither implements "
"`get_conn_value` nor `get_conn_uri`. Method `get_conn_uri` is "
"deprecated and will be removed in a future release. Please implement `get_conn_value`."
)
if value:
return self.deserialize_connection(conn_id=conn_id, value=value)
else:
return None
def get_connections(self, conn_id: str) -> list[Connection]:
"""
Return connection object with a given ``conn_id``.
:param conn_id: connection id
"""
warnings.warn(
"This method is deprecated. Please use "
"`airflow.secrets.base_secrets.BaseSecretsBackend.get_connection`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
conn = self.get_connection(conn_id=conn_id)
if conn:
return [conn]
return []
def get_variable(self, key: str) -> str | None:
"""
Return value for Airflow Variable.
:param key: Variable Key
:return: Variable Value
"""
raise NotImplementedError()
def get_config(self, key: str) -> str | None:
"""
Return value for Airflow Config Key.
:param key: Config Key
:return: Config Value
"""
return None
| 5,333 | 33.636364 | 108 | py |
airflow | airflow-main/airflow/secrets/local_filesystem.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects relating to retrieving connections and variables from local file."""
from __future__ import annotations
import json
import logging
import os
import warnings
from collections import defaultdict
from inspect import signature
from json import JSONDecodeError
from typing import TYPE_CHECKING, Any
from airflow.exceptions import (
AirflowException,
AirflowFileParseException,
ConnectionNotUnique,
FileSyntaxError,
RemovedInAirflow3Warning,
)
from airflow.secrets.base_secrets import BaseSecretsBackend
from airflow.utils import yaml
from airflow.utils.file import COMMENT_PATTERN
from airflow.utils.log.logging_mixin import LoggingMixin
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from airflow.models.connection import Connection
def get_connection_parameter_names() -> set[str]:
"""Returns :class:`airflow.models.connection.Connection` constructor parameters."""
from airflow.models.connection import Connection
return {k for k in signature(Connection.__init__).parameters.keys() if k != "self"}
def _parse_env_file(file_path: str) -> tuple[dict[str, list[str]], list[FileSyntaxError]]:
"""
Parse a file in the ``.env`` format.
.. code-block:: text
MY_CONN_ID=my-conn-type://my-login:my-pa%2Fssword@my-host:5432/my-schema?param1=val1¶m2=val2
:param file_path: The location of the file that will be processed.
:return: Tuple with mapping of key and list of values and list of syntax errors
"""
with open(file_path) as f:
content = f.read()
secrets: dict[str, list[str]] = defaultdict(list)
errors: list[FileSyntaxError] = []
for line_no, line in enumerate(content.splitlines(), 1):
if not line:
# Ignore empty line
continue
if COMMENT_PATTERN.match(line):
# Ignore comments
continue
key, sep, value = line.partition("=")
if not sep:
errors.append(
FileSyntaxError(
line_no=line_no,
message='Invalid line format. The line should contain at least one equal sign ("=").',
)
)
continue
if not value:
errors.append(
FileSyntaxError(
line_no=line_no,
message="Invalid line format. Key is empty.",
)
)
secrets[key].append(value)
return secrets, errors
def _parse_yaml_file(file_path: str) -> tuple[dict[str, list[str]], list[FileSyntaxError]]:
"""
Parse a file in the YAML format.
:param file_path: The location of the file that will be processed.
:return: Tuple with mapping of key and list of values and list of syntax errors
"""
with open(file_path) as f:
content = f.read()
if not content:
return {}, [FileSyntaxError(line_no=1, message="The file is empty.")]
try:
secrets = yaml.safe_load(content)
except yaml.MarkedYAMLError as e:
err_line_no = e.problem_mark.line if e.problem_mark else -1
return {}, [FileSyntaxError(line_no=err_line_no, message=str(e))]
if not isinstance(secrets, dict):
return {}, [FileSyntaxError(line_no=1, message="The file should contain the object.")]
return secrets, []
def _parse_json_file(file_path: str) -> tuple[dict[str, Any], list[FileSyntaxError]]:
"""
Parse a file in the JSON format.
:param file_path: The location of the file that will be processed.
:return: Tuple with mapping of key and list of values and list of syntax errors
"""
with open(file_path) as f:
content = f.read()
if not content:
return {}, [FileSyntaxError(line_no=1, message="The file is empty.")]
try:
secrets = json.loads(content)
except JSONDecodeError as e:
return {}, [FileSyntaxError(line_no=int(e.lineno), message=e.msg)]
if not isinstance(secrets, dict):
return {}, [FileSyntaxError(line_no=1, message="The file should contain the object.")]
return secrets, []
FILE_PARSERS = {
"env": _parse_env_file,
"json": _parse_json_file,
"yaml": _parse_yaml_file,
"yml": _parse_yaml_file,
}
def _parse_secret_file(file_path: str) -> dict[str, Any]:
"""
Based on the file extension format, selects a parser, and parses the file.
:param file_path: The location of the file that will be processed.
:return: Map of secret key (e.g. connection ID) and value.
"""
if not os.path.exists(file_path):
raise AirflowException(
f"File {file_path} was not found. Check the configuration of your Secrets backend."
)
log.debug("Parsing file: %s", file_path)
ext = file_path.rsplit(".", 2)[-1].lower()
if ext not in FILE_PARSERS:
raise AirflowException(
"Unsupported file format. The file must have one of the following extensions: "
".env .json .yaml .yml"
)
secrets, parse_errors = FILE_PARSERS[ext](file_path)
log.debug("Parsed file: len(parse_errors)=%d, len(secrets)=%d", len(parse_errors), len(secrets))
if parse_errors:
raise AirflowFileParseException(
"Failed to load the secret file.", file_path=file_path, parse_errors=parse_errors
)
return secrets
def _create_connection(conn_id: str, value: Any):
"""Creates a connection based on a URL or JSON object."""
from airflow.models.connection import Connection
if isinstance(value, str):
return Connection(conn_id=conn_id, uri=value)
if isinstance(value, dict):
connection_parameter_names = get_connection_parameter_names() | {"extra_dejson"}
current_keys = set(value.keys())
if not current_keys.issubset(connection_parameter_names):
illegal_keys = current_keys - connection_parameter_names
illegal_keys_list = ", ".join(illegal_keys)
raise AirflowException(
f"The object have illegal keys: {illegal_keys_list}. "
f"The dictionary can only contain the following keys: {connection_parameter_names}"
)
if "extra" in value and "extra_dejson" in value:
raise AirflowException(
"The extra and extra_dejson parameters are mutually exclusive. "
"Please provide only one parameter."
)
if "extra_dejson" in value:
value["extra"] = json.dumps(value["extra_dejson"])
del value["extra_dejson"]
if "conn_id" in current_keys and conn_id != value["conn_id"]:
raise AirflowException(
f"Mismatch conn_id. "
f"The dictionary key has the value: {value['conn_id']}. "
f"The item has the value: {conn_id}."
)
value["conn_id"] = conn_id
return Connection(**value)
raise AirflowException(
f"Unexpected value type: {type(value)}. The connection can only be defined using a string or object."
)
def load_variables(file_path: str) -> dict[str, str]:
"""
Load variables from a text file.
``JSON``, `YAML` and ``.env`` files are supported.
:param file_path: The location of the file that will be processed.
"""
log.debug("Loading variables from a text file")
secrets = _parse_secret_file(file_path)
invalid_keys = [key for key, values in secrets.items() if isinstance(values, list) and len(values) != 1]
if invalid_keys:
raise AirflowException(f'The "{file_path}" file contains multiple values for keys: {invalid_keys}')
variables = {key: values[0] if isinstance(values, list) else values for key, values in secrets.items()}
log.debug("Loaded %d variables: ", len(variables))
return variables
def load_connections(file_path) -> dict[str, list[Any]]:
"""Deprecated: Please use `airflow.secrets.local_filesystem.load_connections_dict`."""
warnings.warn(
"This function is deprecated. Please use `airflow.secrets.local_filesystem.load_connections_dict`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return {k: [v] for k, v in load_connections_dict(file_path).values()}
def load_connections_dict(file_path: str) -> dict[str, Any]:
"""
Load connection from text file.
``JSON``, `YAML` and ``.env`` files are supported.
:return: A dictionary where the key contains a connection ID and the value contains the connection.
"""
log.debug("Loading connection")
secrets: dict[str, Any] = _parse_secret_file(file_path)
connection_by_conn_id = {}
for key, secret_values in list(secrets.items()):
if isinstance(secret_values, list):
if len(secret_values) > 1:
raise ConnectionNotUnique(f"Found multiple values for {key} in {file_path}.")
for secret_value in secret_values:
connection_by_conn_id[key] = _create_connection(key, secret_value)
else:
connection_by_conn_id[key] = _create_connection(key, secret_values)
num_conn = len(connection_by_conn_id)
log.debug("Loaded %d connections", num_conn)
return connection_by_conn_id
class LocalFilesystemBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connection objects and Variables from local files.
``JSON``, `YAML` and ``.env`` files are supported.
:param variables_file_path: File location with variables data.
:param connections_file_path: File location with connection data.
"""
def __init__(self, variables_file_path: str | None = None, connections_file_path: str | None = None):
super().__init__()
self.variables_file = variables_file_path
self.connections_file = connections_file_path
@property
def _local_variables(self) -> dict[str, str]:
if not self.variables_file:
self.log.debug("The file for variables is not specified. Skipping")
# The user may not specify any file.
return {}
secrets = load_variables(self.variables_file)
return secrets
@property
def _local_connections(self) -> dict[str, Connection]:
if not self.connections_file:
self.log.debug("The file for connection is not specified. Skipping")
# The user may not specify any file.
return {}
return load_connections_dict(self.connections_file)
def get_connection(self, conn_id: str) -> Connection | None:
if conn_id in self._local_connections:
return self._local_connections[conn_id]
return None
def get_connections(self, conn_id: str) -> list[Any]:
warnings.warn(
"This method is deprecated. Please use "
"`airflow.secrets.local_filesystem.LocalFilesystemBackend.get_connection`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
conn = self.get_connection(conn_id=conn_id)
if conn:
return [conn]
return []
def get_variable(self, key: str) -> str | None:
return self._local_variables.get(key)
| 11,913 | 34.670659 | 109 | py |
airflow | airflow-main/airflow/secrets/environment_variables.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects relating to sourcing connections from environment variables."""
from __future__ import annotations
import os
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.secrets import BaseSecretsBackend
CONN_ENV_PREFIX = "AIRFLOW_CONN_"
VAR_ENV_PREFIX = "AIRFLOW_VAR_"
class EnvironmentVariablesBackend(BaseSecretsBackend):
"""Retrieves Connection object and Variable from environment variable."""
def get_conn_uri(self, conn_id: str) -> str | None:
"""
Return URI representation of Connection conn_id.
:param conn_id: the connection id
:return: deserialized Connection
"""
warnings.warn(
"This method is deprecated. Please use "
"`airflow.secrets.environment_variables.EnvironmentVariablesBackend.get_conn_value`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_conn_value(conn_id)
def get_conn_value(self, conn_id: str) -> str | None:
return os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
def get_variable(self, key: str) -> str | None:
"""
Get Airflow Variable from Environment Variable.
:param key: Variable Key
:return: Variable Value
"""
return os.environ.get(VAR_ENV_PREFIX + key.upper())
| 2,129 | 33.918033 | 98 | py |
airflow | airflow-main/airflow/secrets/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Secrets framework provides means of getting connection objects from various sources.
The following sources are available:
* Environment variables
* Metastore database
* Local Filesystem Secrets Backend
"""
from __future__ import annotations
__all__ = ["BaseSecretsBackend", "DEFAULT_SECRETS_SEARCH_PATH"]
from airflow.secrets.base_secrets import BaseSecretsBackend
DEFAULT_SECRETS_SEARCH_PATH = [
"airflow.secrets.environment_variables.EnvironmentVariablesBackend",
"airflow.secrets.metastore.MetastoreBackend",
]
| 1,318 | 34.648649 | 84 | py |
airflow | airflow-main/airflow/www/auth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import wraps
from typing import Callable, Sequence, TypeVar, cast
from flask import current_app, flash, g, redirect, render_template, request, url_for
from airflow.configuration import auth_manager, conf
from airflow.utils.net import get_hostname
T = TypeVar("T", bound=Callable)
def has_access(permissions: Sequence[tuple[str, str]] | None = None) -> Callable[[T], T]:
"""Factory for decorator that checks current user's permissions against required permissions."""
def requires_access_decorator(func: T):
@wraps(func)
def decorated(*args, **kwargs):
__tracebackhide__ = True # Hide from pytest traceback.
appbuilder = current_app.appbuilder
dag_id = (
kwargs.get("dag_id")
or request.args.get("dag_id")
or request.form.get("dag_id")
or (request.is_json and request.json.get("dag_id"))
or None
)
if appbuilder.sm.check_authorization(permissions, dag_id):
return func(*args, **kwargs)
elif auth_manager.is_logged_in() and not g.user.perms:
return (
render_template(
"airflow/no_roles_permissions.html",
hostname=get_hostname()
if conf.getboolean("webserver", "EXPOSE_HOSTNAME")
else "redact",
logout_url=appbuilder.get_url_for_logout,
),
403,
)
else:
access_denied = "Access is Denied"
flash(access_denied, "danger")
return redirect(
url_for(
appbuilder.sm.auth_view.__class__.__name__ + ".login",
next=request.url,
)
)
return cast(T, decorated)
return requires_access_decorator
| 2,782 | 37.123288 | 100 | py |
airflow | airflow-main/airflow/www/views.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections
import copy
import datetime
import itertools
import json
import logging
import math
import sys
import traceback
import warnings
from bisect import insort_left
from collections import defaultdict
from functools import cached_property, wraps
from json import JSONDecodeError
from typing import Any, Callable, Collection, Iterator, Mapping, MutableMapping, Sequence
from urllib.parse import unquote, urljoin, urlsplit
import configupdater
import flask.json
import lazy_object_proxy
import nvd3
import re2
import sqlalchemy as sqla
from croniter import croniter
from flask import (
Response,
abort,
before_render_template,
flash,
g,
has_request_context,
make_response,
redirect,
render_template,
request,
send_from_directory,
session as flask_session,
url_for,
)
from flask_appbuilder import BaseView, ModelView, expose
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_appbuilder.security.decorators import has_access
from flask_appbuilder.urltools import get_order_args, get_page_args, get_page_size_args
from flask_appbuilder.widgets import FormWidget
from flask_babel import lazy_gettext
from jinja2.utils import htmlsafe_json_dumps, pformat # type: ignore
from markupsafe import Markup, escape
from pendulum.datetime import DateTime
from pendulum.parsing.exceptions import ParserError
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import Date, and_, case, desc, func, inspect, or_, select, union_all
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session, joinedload
from wtforms import SelectField, validators
import airflow
from airflow import models, plugins_manager, settings
from airflow.api.common.airflow_health import get_airflow_health
from airflow.api.common.mark_tasks import (
set_dag_run_state_to_failed,
set_dag_run_state_to_queued,
set_dag_run_state_to_success,
set_state,
)
from airflow.configuration import AIRFLOW_CONFIG, auth_manager, conf
from airflow.datasets import Dataset
from airflow.exceptions import (
AirflowConfigException,
AirflowException,
AirflowNotFoundException,
ParamValidationError,
RemovedInAirflow3Warning,
)
from airflow.executors.executor_loader import ExecutorLoader
from airflow.hooks.base import BaseHook
from airflow.jobs.job import Job
from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
from airflow.jobs.triggerer_job_runner import TriggererJobRunner
from airflow.models import Connection, DagModel, DagTag, Log, SlaMiss, TaskFail, Trigger, XCom, errors
from airflow.models.abstractoperator import AbstractOperator
from airflow.models.dag import DAG, get_dataset_triggered_next_run_info
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import RUN_ID_REGEX, DagRun, DagRunType
from airflow.models.dataset import DagScheduleDatasetReference, DatasetDagRunQueue, DatasetEvent, DatasetModel
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstance, TaskInstanceNote
from airflow.providers_manager import ProvidersManager
from airflow.security import permissions
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS
from airflow.timetables._cron import CronMixin
from airflow.timetables.base import DataInterval, TimeRestriction
from airflow.utils import json as utils_json, timezone, yaml
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.dag_edges import dag_edges
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.docs import get_doc_url_for_provider, get_docs_url
from airflow.utils.helpers import alchemy_to_dict, exactly_one
from airflow.utils.log import secrets_masker
from airflow.utils.log.log_reader import TaskLogReader
from airflow.utils.net import get_hostname
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.strings import to_boolean
from airflow.utils.task_group import MappedTaskGroup, TaskGroup, task_group_to_dict
from airflow.utils.timezone import td_format, utcnow
from airflow.version import version
from airflow.www import auth, utils as wwwutils
from airflow.www.decorators import action_logging, gzipped
from airflow.www.forms import (
DagRunEditForm,
DateTimeForm,
DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm,
TaskInstanceEditForm,
create_connection_form_class,
)
from airflow.www.widgets import AirflowModelListWidget, AirflowVariableShowWidget
PAGE_SIZE = conf.getint("webserver", "page_size")
FILTER_TAGS_COOKIE = "tags_filter"
FILTER_STATUS_COOKIE = "dag_status_filter"
LINECHART_X_AXIS_TICKFORMAT = (
"function (d, i) { let xLabel;"
"if (i === undefined) {xLabel = d3.time.format('%H:%M, %d %b %Y')(new Date(parseInt(d)));"
"} else {xLabel = d3.time.format('%H:%M, %d %b')(new Date(parseInt(d)));} return xLabel;}"
)
SENSITIVE_FIELD_PLACEHOLDER = "RATHER_LONG_SENSITIVE_FIELD_PLACEHOLDER"
def sanitize_args(args: dict[str, str]) -> dict[str, str]:
"""
Remove all parameters starting with `_`.
:param args: arguments of request
:return: copy of the dictionary passed as input with args starting with `_` removed.
"""
return {key: value for key, value in args.items() if not key.startswith("_")}
# Following the release of https://github.com/python/cpython/issues/102153 in Python 3.8.17 and 3.9.17 on
# June 6, 2023, we are adding extra sanitization of the urls passed to get_safe_url method to make it works
# the same way regardless if the user uses latest Python patchlevel versions or not. This also follows
# a recommended solution by the Python core team.
#
# From: https://github.com/python/cpython/commit/d28bafa2d3e424b6fdcfd7ae7cde8e71d7177369
#
# We recommend that users of these APIs where the values may be used anywhere
# with security implications code defensively. Do some verification within your
# code before trusting a returned component part. Does that ``scheme`` make
# sense? Is that a sensible ``path``? Is there anything strange about that
# ``hostname``? etc.
#
# C0 control and space to be stripped per WHATWG spec.
# == "".join([chr(i) for i in range(0, 0x20 + 1)])
_WHATWG_C0_CONTROL_OR_SPACE = (
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c"
"\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f "
)
def get_safe_url(url):
"""Given a user-supplied URL, ensure it points to our web server."""
if not url:
return url_for("Airflow.index")
# If the url contains semicolon, redirect it to homepage to avoid
# potential XSS. (Similar to https://github.com/python/cpython/pull/24297/files (bpo-42967))
if ";" in unquote(url):
return url_for("Airflow.index")
url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE)
host_url = urlsplit(request.host_url)
redirect_url = urlsplit(urljoin(request.host_url, url))
if not (redirect_url.scheme in ("http", "https") and host_url.netloc == redirect_url.netloc):
return url_for("Airflow.index")
# This will ensure we only redirect to the right scheme/netloc
return redirect_url.geturl()
def get_date_time_num_runs_dag_runs_form_data(www_request, session, dag):
"""Get Execution Data, Base Date & Number of runs from a Request."""
date_time = www_request.args.get("execution_date")
run_id = www_request.args.get("run_id")
# First check run id, then check execution date, if not fall back on the latest dagrun
if run_id:
dagrun = dag.get_dagrun(run_id=run_id, session=session)
date_time = dagrun.execution_date
elif date_time:
date_time = _safe_parse_datetime(date_time)
else:
date_time = dag.get_latest_execution_date(session=session) or timezone.utcnow()
base_date = www_request.args.get("base_date")
if base_date:
base_date = _safe_parse_datetime(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (date_time + datetime.timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint("webserver", "default_dag_run_display_number")
num_runs = www_request.args.get("num_runs", default=default_dag_run, type=int)
# When base_date has been rounded up because of the DateTimeField widget, we want
# to use the execution_date as the starting point for our query just to ensure a
# link targeting a specific dag run actually loads that dag run. If there are
# more than num_runs dag runs in the "rounded period" then those dagruns would get
# loaded and the actual requested run would be excluded by the limit(). Once
# the user has changed base date to be anything else we want to use that instead.
query_date = base_date
if date_time < base_date <= date_time + datetime.timedelta(seconds=1):
query_date = date_time
drs = session.scalars(
select(DagRun)
.where(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= query_date)
.order_by(desc(DagRun.execution_date))
.limit(num_runs)
).all()
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if date_time == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
date_time = dr.execution_date
dr_state = dr.state
return {
"dttm": date_time,
"base_date": base_date,
"num_runs": num_runs,
"execution_date": date_time.isoformat(),
"dr_choices": dr_choices,
"dr_state": dr_state,
}
def _safe_parse_datetime(v, allow_empty=False) -> datetime.datetime | None:
"""
Parse datetime and return error message for invalid dates.
:param v: the string value to be parsed
:param allow_empty: Set True to return none if empty str or None
"""
if allow_empty is True and not v:
return None
try:
return timezone.parse(v)
except (TypeError, ParserError):
abort(400, f"Invalid datetime: {v!r}")
def node_dict(node_id, label, node_class):
return {
"id": node_id,
"value": {"label": label, "rx": 5, "ry": 5, "class": node_class},
}
def dag_to_grid(dag: DagModel, dag_runs: Sequence[DagRun], session: Session):
"""
Create a nested dict representation of the DAG's TaskGroup and its children
used to construct the Graph and Grid views.
"""
query = session.execute(
select(
TaskInstance.task_id,
TaskInstance.run_id,
TaskInstance.state,
TaskInstance._try_number,
func.min(TaskInstanceNote.content).label("note"),
func.count(func.coalesce(TaskInstance.state, sqla.literal("no_status"))).label("state_count"),
func.min(TaskInstance.start_date).label("start_date"),
func.max(TaskInstance.end_date).label("end_date"),
)
.join(TaskInstance.task_instance_note, isouter=True)
.where(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.run_id.in_([dag_run.run_id for dag_run in dag_runs]),
)
.group_by(TaskInstance.task_id, TaskInstance.run_id, TaskInstance.state, TaskInstance._try_number)
.order_by(TaskInstance.task_id, TaskInstance.run_id)
)
grouped_tis = {task_id: list(tis) for task_id, tis in itertools.groupby(query, key=lambda ti: ti.task_id)}
sort_order = conf.get("webserver", "grid_view_sorting_order", fallback="topological")
if sort_order == "topological":
sort_children_fn = lambda task_group: task_group.topological_sort()
elif sort_order == "hierarchical_alphabetical":
sort_children_fn = lambda task_group: task_group.hierarchical_alphabetical_sort()
else:
raise AirflowConfigException(f"Unsupported grid_view_sorting_order: {sort_order}")
def task_group_to_grid(item, grouped_tis, *, is_parent_mapped: bool):
if not isinstance(item, TaskGroup):
def _get_summary(task_instance):
return {
"task_id": task_instance.task_id,
"run_id": task_instance.run_id,
"state": task_instance.state,
"start_date": task_instance.start_date,
"end_date": task_instance.end_date,
"try_number": wwwutils.get_try_count(task_instance._try_number, task_instance.state),
"note": task_instance.note,
}
def _mapped_summary(ti_summaries):
run_id = None
record = None
def set_overall_state(record):
for state in wwwutils.priority:
if state in record["mapped_states"]:
record["state"] = state
break
if None in record["mapped_states"]:
# When turning the dict into JSON we can't have None as a key,
# so use the string that the UI does.
record["mapped_states"]["no_status"] = record["mapped_states"].pop(None)
for ti_summary in ti_summaries:
if run_id != ti_summary.run_id:
run_id = ti_summary.run_id
if record:
set_overall_state(record)
yield record
record = {
"task_id": ti_summary.task_id,
"run_id": run_id,
"start_date": ti_summary.start_date,
"end_date": ti_summary.end_date,
"mapped_states": {ti_summary.state: ti_summary.state_count},
"state": None, # We change this before yielding
}
continue
record["start_date"] = min(
filter(None, [record["start_date"], ti_summary.start_date]), default=None
)
record["end_date"] = max(
filter(None, [record["end_date"], ti_summary.end_date]), default=None
)
record["mapped_states"][ti_summary.state] = ti_summary.state_count
if record:
set_overall_state(record)
yield record
if isinstance(item, MappedOperator) or is_parent_mapped:
instances = list(_mapped_summary(grouped_tis.get(item.task_id, [])))
else:
instances = list(map(_get_summary, grouped_tis.get(item.task_id, [])))
setup_teardown_type = {}
if item.is_setup is True:
setup_teardown_type["setupTeardownType"] = "setup"
elif item.is_teardown is True:
setup_teardown_type["setupTeardownType"] = "teardown"
return {
"id": item.task_id,
"instances": instances,
"label": item.label,
"extra_links": item.extra_links,
"is_mapped": isinstance(item, MappedOperator) or is_parent_mapped,
"has_outlet_datasets": any(isinstance(i, Dataset) for i in (item.outlets or [])),
"operator": item.operator_name,
"trigger_rule": item.trigger_rule,
**setup_teardown_type,
}
# Task Group
task_group = item
group_is_mapped = isinstance(task_group, MappedTaskGroup)
children = [
task_group_to_grid(child, grouped_tis, is_parent_mapped=group_is_mapped)
for child in sort_children_fn(task_group)
]
def get_summary(dag_run: DagRun):
child_instances = [
item
for sublist in (child["instances"] for child in children if "instances" in child)
for item in sublist
if item["run_id"] == dag_run.run_id
if item
]
children_start_dates = (item["start_date"] for item in child_instances)
children_end_dates = (item["end_date"] for item in child_instances)
children_states = {item["state"] for item in child_instances}
group_state = next((state for state in wwwutils.priority if state in children_states), None)
group_start_date = min(filter(None, children_start_dates), default=None)
group_end_date = max(filter(None, children_end_dates), default=None)
return {
"task_id": task_group.group_id,
"run_id": dag_run.run_id,
"state": group_state,
"start_date": group_start_date,
"end_date": group_end_date,
}
def get_mapped_group_summaries():
mapped_ti_query = session.execute(
select(TaskInstance.task_id, TaskInstance.state, TaskInstance.run_id, TaskInstance.map_index)
.where(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id.in_(child["id"] for child in children),
TaskInstance.run_id.in_(r.run_id for r in dag_runs),
)
.order_by(TaskInstance.task_id, TaskInstance.run_id)
)
# Group tis by run_id, and then map_index.
mapped_tis: Mapping[str, Mapping[int, list[TaskInstance]]] = collections.defaultdict(
lambda: collections.defaultdict(list),
)
for ti in mapped_ti_query:
mapped_tis[ti.run_id][ti.map_index].append(ti)
def get_mapped_group_summary(run_id: str, mapped_instances: Mapping[int, list[TaskInstance]]):
child_instances = [
item
for sublist in (child["instances"] for child in children if "instances" in child)
for item in sublist
if item and item["run_id"] == run_id
]
children_start_dates = (item["start_date"] for item in child_instances)
children_end_dates = (item["end_date"] for item in child_instances)
children_states = {item["state"] for item in child_instances}
# TODO: This assumes TI map index has a one-to-one mapping to
# its parent mapped task group, which will not be true when we
# allow nested mapping in the future.
mapped_states: MutableMapping[str, int] = collections.defaultdict(int)
for mis in mapped_instances.values():
child_states = {mi.state for mi in mis}
state = next(s for s in wwwutils.priority if s in child_states)
value = state.value if state is not None else "no_status"
mapped_states[value] += 1
group_state = next((state for state in wwwutils.priority if state in children_states), None)
group_start_date = min(filter(None, children_start_dates), default=None)
group_end_date = max(filter(None, children_end_dates), default=None)
return {
"task_id": task_group.group_id,
"run_id": run_id,
"state": group_state,
"start_date": group_start_date,
"end_date": group_end_date,
"mapped_states": mapped_states,
}
return [get_mapped_group_summary(run_id, tis) for run_id, tis in mapped_tis.items()]
# We don't need to calculate summaries for the root
if task_group.group_id is None:
return {
"id": task_group.group_id,
"label": task_group.label,
"children": children,
"instances": [],
}
if group_is_mapped:
mapped_group_summaries = get_mapped_group_summaries()
return {
"id": task_group.group_id,
"label": task_group.label,
"children": children,
"tooltip": task_group.tooltip,
"instances": mapped_group_summaries,
"is_mapped": group_is_mapped,
}
group_summaries = [get_summary(dr) for dr in dag_runs]
return {
"id": task_group.group_id,
"label": task_group.label,
"children": children,
"tooltip": task_group.tooltip,
"instances": group_summaries,
}
return task_group_to_grid(dag.task_group, grouped_tis, is_parent_mapped=False)
def get_key_paths(input_dict):
"""Return a list of dot-separated dictionary paths."""
for key, value in input_dict.items():
if isinstance(value, dict):
for sub_key in get_key_paths(value):
yield ".".join((key, sub_key))
else:
yield key
def get_value_from_path(key_path, content):
"""Return the value from a dictionary based on dot-separated path of keys."""
elem = content
for x in key_path.strip(".").split("."):
try:
x = int(x)
elem = elem[x]
except ValueError:
elem = elem.get(x)
return elem
def get_task_stats_from_query(qry):
"""
Return a dict of the task quantity, grouped by dag id and task status.
:param qry: The data in the format (<dag id>, <task state>, <is dag running>, <task count>),
ordered by <dag id> and <is dag running>
"""
data = {}
last_dag_id = None
has_running_dags = False
for dag_id, state, is_dag_running, count in qry:
if last_dag_id != dag_id:
last_dag_id = dag_id
has_running_dags = False
elif not is_dag_running and has_running_dags:
continue
if is_dag_running:
has_running_dags = True
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
return data
def redirect_or_json(origin, msg, status="", status_code=200):
"""
Some endpoints are called by javascript,
returning json will allow us to more elegantly handle side-effects in-page.
"""
if request.headers.get("Accept") == "application/json":
if status == "error" and status_code == 200:
status_code = 500
return Response(response=msg, status=status_code, mimetype="application/json")
else:
if status:
flash(msg, status)
else:
flash(msg)
return redirect(origin)
######################################################################################
# Error handlers
######################################################################################
def not_found(error):
"""Show Not Found on screen for any error in the Webserver."""
return (
render_template(
"airflow/error.html",
hostname=get_hostname() if conf.getboolean("webserver", "EXPOSE_HOSTNAME") else "redact",
status_code=404,
error_message="Page cannot be found.",
),
404,
)
def method_not_allowed(error):
"""Show Method Not Allowed on screen for any error in the Webserver."""
return (
render_template(
"airflow/error.html",
hostname=get_hostname() if conf.getboolean("webserver", "EXPOSE_HOSTNAME") else "redact",
status_code=405,
error_message="Received an invalid request.",
),
405,
)
def show_traceback(error):
"""Show Traceback for a given error."""
return (
render_template(
"airflow/traceback.html",
python_version=sys.version.split(" ")[0] if auth_manager.is_logged_in() else "redact",
airflow_version=version if auth_manager.is_logged_in() else "redact",
hostname=get_hostname()
if conf.getboolean("webserver", "EXPOSE_HOSTNAME") and auth_manager.is_logged_in()
else "redact",
info=traceback.format_exc()
if conf.getboolean("webserver", "EXPOSE_STACKTRACE") and auth_manager.is_logged_in()
else "Error! Please contact server admin.",
),
500,
)
######################################################################################
# BaseViews
######################################################################################
class AirflowBaseView(BaseView):
"""Base View to set Airflow related properties."""
from airflow import macros
route_base = ""
extra_args = {
# Make our macros available to our UI templates too.
"macros": macros,
"get_docs_url": get_docs_url,
}
if not conf.getboolean("core", "unit_test_mode"):
executor, _ = ExecutorLoader.import_default_executor_cls()
extra_args["sqlite_warning"] = settings.engine.dialect.name == "sqlite"
if not executor.is_production:
extra_args["production_executor_warning"] = executor.__name__
extra_args["otel_on"] = conf.getboolean("metrics", "otel_on")
line_chart_attr = {
"legend.maxKeyLength": 200,
}
def render_template(self, *args, **kwargs):
# Add triggerer_job only if we need it
if TriggererJobRunner.is_needed():
kwargs["triggerer_job"] = lazy_object_proxy.Proxy(TriggererJobRunner.most_recent_job)
return super().render_template(
*args,
# Cache this at most once per request, not for the lifetime of the view instance
scheduler_job=lazy_object_proxy.Proxy(SchedulerJobRunner.most_recent_job),
**kwargs,
)
class Airflow(AirflowBaseView):
"""Main Airflow application."""
@expose("/health")
def health(self):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase, scheduler and triggerer.
"""
airflow_health_status = get_airflow_health()
return flask.json.jsonify(airflow_health_status)
@expose("/home")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
]
)
def index(self):
"""Home view."""
from airflow.models.dag import DagOwnerAttributes
hide_paused_dags_by_default = conf.getboolean("webserver", "hide_paused_dags_by_default")
default_dag_run = conf.getint("webserver", "default_dag_run_display_number")
num_runs = request.args.get("num_runs", default=default_dag_run, type=int)
current_page = request.args.get("page", default=0, type=int)
arg_search_query = request.args.get("search")
arg_tags_filter = request.args.getlist("tags")
arg_status_filter = request.args.get("status")
arg_sorting_key = request.args.get("sorting_key", "dag_id")
arg_sorting_direction = request.args.get("sorting_direction", default="asc")
if request.args.get("reset_tags") is not None:
flask_session[FILTER_TAGS_COOKIE] = None
# Remove the reset_tags=reset from the URL
return redirect(url_for("Airflow.index"))
cookie_val = flask_session.get(FILTER_TAGS_COOKIE)
if arg_tags_filter:
flask_session[FILTER_TAGS_COOKIE] = ",".join(arg_tags_filter)
elif cookie_val:
# If tags exist in cookie, but not URL, add them to the URL
return redirect(url_for("Airflow.index", tags=cookie_val.split(",")))
if arg_status_filter is None:
cookie_val = flask_session.get(FILTER_STATUS_COOKIE)
if cookie_val:
arg_status_filter = cookie_val
else:
arg_status_filter = "active" if hide_paused_dags_by_default else "all"
flask_session[FILTER_STATUS_COOKIE] = arg_status_filter
else:
status = arg_status_filter.strip().lower()
flask_session[FILTER_STATUS_COOKIE] = status
arg_status_filter = status
dags_per_page = PAGE_SIZE
start = current_page * dags_per_page
end = start + dags_per_page
# Get all the dag id the user could access
filter_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
with create_session() as session:
# read orm_dags from the db
dags_query = select(DagModel).where(~DagModel.is_subdag, DagModel.is_active)
if arg_search_query:
escaped_arg_search_query = arg_search_query.replace("_", r"\_")
dags_query = dags_query.where(
DagModel.dag_id.ilike("%" + escaped_arg_search_query + "%", escape="\\")
| DagModel.owners.ilike("%" + escaped_arg_search_query + "%", escape="\\")
)
if arg_tags_filter:
dags_query = dags_query.where(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))
dags_query = dags_query.where(DagModel.dag_id.in_(filter_dag_ids))
filtered_dag_count = session.scalar(select(func.count()).select_from(dags_query))
if filtered_dag_count == 0 and len(arg_tags_filter):
flash(
"No matching DAG tags found.",
"warning",
)
flask_session[FILTER_TAGS_COOKIE] = None
return redirect(url_for("Airflow.index"))
all_dags = dags_query
active_dags = dags_query.where(~DagModel.is_paused)
paused_dags = dags_query.where(DagModel.is_paused)
# find DAGs which have a RUNNING DagRun
running_dags = dags_query.join(DagRun, DagModel.dag_id == DagRun.dag_id).where(
DagRun.state == DagRunState.RUNNING
)
# find DAGs for which the latest DagRun is FAILED
subq_all = (
select(DagRun.dag_id, func.max(DagRun.start_date).label("start_date"))
.group_by(DagRun.dag_id)
.subquery()
)
subq_failed = (
select(DagRun.dag_id, func.max(DagRun.start_date).label("start_date"))
.where(DagRun.state == DagRunState.FAILED)
.group_by(DagRun.dag_id)
.subquery()
)
subq_join = (
select(subq_all.c.dag_id, subq_all.c.start_date)
.join(
subq_failed,
and_(
subq_all.c.dag_id == subq_failed.c.dag_id,
subq_all.c.start_date == subq_failed.c.start_date,
),
)
.subquery()
)
failed_dags = dags_query.join(subq_join, DagModel.dag_id == subq_join.c.dag_id)
is_paused_count = dict(
session.execute(
select(DagModel.is_paused, func.count(DagModel.dag_id)).group_by(DagModel.is_paused)
).all()
)
status_count_active = is_paused_count.get(False, 0)
status_count_paused = is_paused_count.get(True, 0)
status_count_running = session.scalar(select(func.count()).select_from(running_dags))
status_count_failed = session.scalar(select(func.count()).select_from(failed_dags))
all_dags_count = status_count_active + status_count_paused
if arg_status_filter == "active":
current_dags = active_dags
num_of_all_dags = status_count_active
elif arg_status_filter == "paused":
current_dags = paused_dags
num_of_all_dags = status_count_paused
elif arg_status_filter == "running":
current_dags = running_dags
num_of_all_dags = status_count_running
elif arg_status_filter == "failed":
current_dags = failed_dags
num_of_all_dags = status_count_failed
else:
current_dags = all_dags
num_of_all_dags = all_dags_count
if arg_sorting_key == "last_dagrun":
dag_run_subquery = (
select(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label("max_execution_date"),
)
.group_by(DagRun.dag_id)
.subquery()
)
current_dags = current_dags.outerjoin(
dag_run_subquery, and_(dag_run_subquery.c.dag_id == DagModel.dag_id)
)
null_case = case((dag_run_subquery.c.max_execution_date.is_(None), 1), else_=0)
if arg_sorting_direction == "desc":
current_dags = current_dags.order_by(
null_case, dag_run_subquery.c.max_execution_date.desc()
)
else:
current_dags = current_dags.order_by(null_case, dag_run_subquery.c.max_execution_date)
else:
sort_column = DagModel.__table__.c.get(arg_sorting_key)
if sort_column is not None:
null_case = case((sort_column.is_(None), 1), else_=0)
if arg_sorting_direction == "desc":
current_dags = current_dags.order_by(null_case, sort_column.desc())
else:
current_dags = current_dags.order_by(null_case, sort_column)
dags = (
session.scalars(
current_dags.options(joinedload(DagModel.tags)).offset(start).limit(dags_per_page)
)
.unique()
.all()
)
user_permissions = g.user.perms
can_create_dag_run = (
permissions.ACTION_CAN_CREATE,
permissions.RESOURCE_DAG_RUN,
) in user_permissions
dataset_triggered_dag_ids = {dag.dag_id for dag in dags if dag.schedule_interval == "Dataset"}
if dataset_triggered_dag_ids:
dataset_triggered_next_run_info = get_dataset_triggered_next_run_info(
dataset_triggered_dag_ids, session=session
)
else:
dataset_triggered_next_run_info = {}
for dag in dags:
dag.can_edit = get_airflow_app().appbuilder.sm.can_edit_dag(dag.dag_id, g.user)
dag.can_trigger = dag.can_edit and can_create_dag_run
dag.can_delete = get_airflow_app().appbuilder.sm.can_delete_dag(dag.dag_id, g.user)
dagtags = session.execute(select(func.distinct(DagTag.name)).order_by(DagTag.name)).all()
tags = [
{"name": name, "selected": bool(arg_tags_filter and name in arg_tags_filter)}
for name, in dagtags
]
owner_links_dict = DagOwnerAttributes.get_all(session)
import_errors = select(errors.ImportError).order_by(errors.ImportError.id)
if (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG) not in user_permissions:
# if the user doesn't have access to all DAGs, only display errors from visible DAGs
import_errors = import_errors.join(
DagModel, DagModel.fileloc == errors.ImportError.filename
).where(DagModel.dag_id.in_(filter_dag_ids))
import_errors = session.scalars(import_errors)
for import_error in import_errors:
flash(
f"Broken DAG: [{import_error.filename}] {import_error.stacktrace}",
"dag_import_error",
)
from airflow.plugins_manager import import_errors as plugin_import_errors
for filename, stacktrace in plugin_import_errors.items():
flash(
f"Broken plugin: [{filename}] {stacktrace}",
"error",
)
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
state_color_mapping = State.state_color.copy()
state_color_mapping["null"] = state_color_mapping.pop(None)
page_title = conf.get(section="webserver", key="instance_name", fallback="DAGs")
page_title_has_markup = conf.getboolean(
section="webserver", key="instance_name_has_markup", fallback=False
)
dashboard_alerts = [
fm for fm in settings.DASHBOARD_UIALERTS if fm.should_show(get_airflow_app().appbuilder.sm)
]
def _iter_parsed_moved_data_table_names():
for table_name in inspect(session.get_bind()).get_table_names():
segments = table_name.split("__", 3)
if len(segments) < 3:
continue
if segments[0] != settings.AIRFLOW_MOVED_TABLE_PREFIX:
continue
# Second segment is a version marker that we don't need to show.
yield segments[-1], table_name
if (
permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_ADMIN_MENU,
) in user_permissions and conf.getboolean("webserver", "warn_deployment_exposure"):
robots_file_access_count = (
select(Log)
.where(Log.event == "robots")
.where(Log.dttm > (utcnow() - datetime.timedelta(days=7)))
)
robots_file_access_count = session.scalar(
select(func.count()).select_from(robots_file_access_count)
)
if robots_file_access_count > 0:
flash(
Markup(
"Recent requests have been made to /robots.txt. "
"This indicates that this deployment may be accessible to the public internet. "
"This warning can be disabled by setting webserver.warn_deployment_exposure=False in "
"airflow.cfg. Read more about web deployment security <a href="
f'"{get_docs_url("security/webserver.html")}">'
"here</a>"
),
"warning",
)
return self.render_template(
"airflow/dags.html",
dags=dags,
dashboard_alerts=dashboard_alerts,
migration_moved_data_alerts=sorted(set(_iter_parsed_moved_data_table_names())),
current_page=current_page,
search_query=arg_search_query if arg_search_query else "",
page_title=Markup(page_title) if page_title_has_markup else page_title,
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=min(start + 1, num_of_all_dags),
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(
current_page,
num_of_pages,
search=escape(arg_search_query) if arg_search_query else None,
status=arg_status_filter if arg_status_filter else None,
tags=arg_tags_filter if arg_tags_filter else None,
sorting_key=arg_sorting_key if arg_sorting_key else None,
sorting_direction=arg_sorting_direction if arg_sorting_direction else None,
),
num_runs=num_runs,
tags=tags,
owner_links=owner_links_dict,
state_color=state_color_mapping,
status_filter=arg_status_filter,
status_count_all=all_dags_count,
status_count_active=status_count_active,
status_count_paused=status_count_paused,
status_count_running=status_count_running,
status_count_failed=status_count_failed,
tags_filter=arg_tags_filter,
sorting_key=arg_sorting_key,
sorting_direction=arg_sorting_direction,
auto_refresh_interval=conf.getint("webserver", "auto_refresh_interval"),
dataset_triggered_next_run_info=dataset_triggered_next_run_info,
)
@expose("/datasets")
@auth.has_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET)])
def datasets(self):
"""Datasets view."""
state_color_mapping = State.state_color.copy()
state_color_mapping["null"] = state_color_mapping.pop(None)
return self.render_template(
"airflow/datasets.html",
state_color_mapping=state_color_mapping,
)
@expose("/cluster_activity")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),
]
)
def cluster_activity(self):
"""Cluster Activity view."""
state_color_mapping = State.state_color.copy()
state_color_mapping["no_status"] = state_color_mapping.pop(None)
return self.render_template(
"airflow/cluster_activity.html",
auto_refresh_interval=conf.getint("webserver", "auto_refresh_interval"),
state_color_mapping=state_color_mapping,
)
@expose("/next_run_datasets_summary", methods=["POST"])
@auth.has_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
@provide_session
def next_run_datasets_summary(self, session: Session = NEW_SESSION):
"""Next run info for dataset triggered DAGs."""
allowed_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
if not allowed_dag_ids:
return flask.json.jsonify({})
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist("dag_ids") if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
dataset_triggered_dag_ids = [
dag.dag_id
for dag in (
session.scalars(
select(DagModel.dag_id)
.where(DagModel.dag_id.in_(filter_dag_ids))
.where(DagModel.schedule_interval == "Dataset")
)
)
]
dataset_triggered_next_run_info = get_dataset_triggered_next_run_info(
dataset_triggered_dag_ids, session=session
)
return flask.json.jsonify(dataset_triggered_next_run_info)
@expose("/dag_stats", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_stats(self, session: Session = NEW_SESSION):
"""Dag statistics."""
allowed_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist("dag_ids") if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return flask.json.jsonify({})
dag_state_stats = session.execute(
select(DagRun.dag_id, DagRun.state, sqla.func.count(DagRun.state))
.group_by(DagRun.dag_id, DagRun.state)
.where(DagRun.dag_id.in_(filter_dag_ids))
)
dag_state_data = {(dag_id, state): count for dag_id, state, count in dag_state_stats}
payload = {
dag_id: [
{"state": state, "count": dag_state_data.get((dag_id, state), 0)}
for state in State.dag_states
]
for dag_id in filter_dag_ids
}
return flask.json.jsonify(payload)
@expose("/task_stats", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def task_stats(self, session: Session = NEW_SESSION):
"""Task Statistics."""
allowed_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
if not allowed_dag_ids:
return flask.json.jsonify({})
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist("dag_ids") if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
running_dag_run_query_result = (
select(DagRun.dag_id, DagRun.run_id)
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.where(DagRun.state == DagRunState.RUNNING, DagModel.is_active)
)
running_dag_run_query_result = running_dag_run_query_result.where(DagRun.dag_id.in_(filter_dag_ids))
running_dag_run_query_result = running_dag_run_query_result.subquery("running_dag_run")
# Select all task_instances from active dag_runs.
running_task_instance_query_result = select(
TaskInstance.dag_id.label("dag_id"),
TaskInstance.state.label("state"),
sqla.literal(True).label("is_dag_running"),
).join(
running_dag_run_query_result,
and_(
running_dag_run_query_result.c.dag_id == TaskInstance.dag_id,
running_dag_run_query_result.c.run_id == TaskInstance.run_id,
),
)
if conf.getboolean("webserver", "SHOW_RECENT_STATS_FOR_COMPLETED_RUNS", fallback=True):
last_dag_run = (
select(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label("execution_date"))
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.where(DagRun.state != DagRunState.RUNNING, DagModel.is_active)
.group_by(DagRun.dag_id)
)
last_dag_run = last_dag_run.where(DagRun.dag_id.in_(filter_dag_ids))
last_dag_run = last_dag_run.subquery("last_dag_run")
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
last_task_instance_query_result = (
select(
TaskInstance.dag_id.label("dag_id"),
TaskInstance.state.label("state"),
sqla.literal(False).label("is_dag_running"),
)
.join(TaskInstance.dag_run)
.join(
last_dag_run,
and_(
last_dag_run.c.dag_id == TaskInstance.dag_id,
last_dag_run.c.execution_date == DagRun.execution_date,
),
)
)
final_task_instance_query_result = union_all(
last_task_instance_query_result, running_task_instance_query_result
).alias("final_ti")
else:
final_task_instance_query_result = running_task_instance_query_result.subquery("final_ti")
qry = session.execute(
select(
final_task_instance_query_result.c.dag_id,
final_task_instance_query_result.c.state,
final_task_instance_query_result.c.is_dag_running,
sqla.func.count(),
)
.group_by(
final_task_instance_query_result.c.dag_id,
final_task_instance_query_result.c.state,
final_task_instance_query_result.c.is_dag_running,
)
.order_by(
final_task_instance_query_result.c.dag_id,
final_task_instance_query_result.c.is_dag_running.desc(),
)
)
data = get_task_stats_from_query(qry)
payload: dict[str, list[dict[str, Any]]] = collections.defaultdict(list)
for dag_id in filter_dag_ids:
for state in State.task_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({"state": state, "count": count})
return flask.json.jsonify(payload)
@expose("/last_dagruns", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def last_dagruns(self, session: Session = NEW_SESSION):
"""Last DAG runs."""
allowed_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist("dag_ids") if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return flask.json.jsonify({})
last_runs_subquery = (
select(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label("max_execution_date"),
)
.group_by(DagRun.dag_id)
.where(DagRun.dag_id.in_(filter_dag_ids)) # Only include accessible/selected DAGs.
.subquery("last_runs")
)
query = session.execute(
select(
DagRun.dag_id,
DagRun.start_date,
DagRun.end_date,
DagRun.state,
DagRun.execution_date,
DagRun.data_interval_start,
DagRun.data_interval_end,
).join(
last_runs_subquery,
and_(
last_runs_subquery.c.dag_id == DagRun.dag_id,
last_runs_subquery.c.max_execution_date == DagRun.execution_date,
),
)
)
resp = {
r.dag_id.replace(".", "__dot__"): {
"dag_id": r.dag_id,
"state": r.state,
"execution_date": wwwutils.datetime_to_string(r.execution_date),
"start_date": wwwutils.datetime_to_string(r.start_date),
"end_date": wwwutils.datetime_to_string(r.end_date),
"data_interval_start": wwwutils.datetime_to_string(r.data_interval_start),
"data_interval_end": wwwutils.datetime_to_string(r.data_interval_end),
}
for r in query
}
return flask.json.jsonify(resp)
@expose("/code")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
]
)
def legacy_code(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.code", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/code")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
]
)
@provide_session
def code(self, dag_id, session: Session = NEW_SESSION):
"""Dag Code."""
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag or not dag_model:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag_model.fileloc, session)
wwwutils.check_dag_warnings(dag_model.dag_id, session)
try:
code = DagCode.get_code_by_fileloc(dag_model.fileloc)
html_code = Markup(highlight(code, lexers.PythonLexer(), HtmlFormatter(linenos=True)))
except Exception as e:
error = f"Exception encountered during dag code retrieval/code highlighting:\n\n{e}\n"
html_code = Markup("<p>Failed to load DAG file Code.</p><p>Details: {}</p>").format(escape(error))
return self.render_template(
"airflow/dag_code.html",
html_code=html_code,
dag=dag,
dag_model=dag_model,
title=dag_id,
root=request.args.get("root"),
wrapped=conf.getboolean("webserver", "default_wrap"),
)
@expose("/dag_details")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
def legacy_dag_details(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.dag_details", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/details")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_details(self, dag_id, session: Session = NEW_SESSION):
"""Get Dag details."""
from airflow.models.dag import DagOwnerAttributes
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
title = "DAG Details"
root = request.args.get("root", "")
states = session.execute(
select(TaskInstance.state, sqla.func.count(TaskInstance.dag_id))
.where(TaskInstance.dag_id == dag_id)
.group_by(TaskInstance.state)
).all()
active_runs = models.DagRun.find(dag_id=dag_id, state=DagRunState.RUNNING, external_trigger=False)
tags = session.scalars(select(models.DagTag).where(models.DagTag.dag_id == dag_id)).all()
# TODO: convert this to a relationship
owner_links = session.execute(select(DagOwnerAttributes).filter_by(dag_id=dag_id)).all()
attrs_to_avoid = [
"schedule_datasets",
"schedule_dataset_references",
"task_outlet_dataset_references",
"NUM_DAGS_PER_DAGRUN_QUERY",
"serialized_dag",
"tags",
"default_view",
"relative_fileloc",
"dag_id",
"description",
"max_active_runs",
"max_active_tasks",
"schedule_interval",
"owners",
"dag_owner_links",
"is_paused",
]
attrs_to_avoid.extend(wwwutils.get_attr_renderer().keys())
dag_model_attrs: list[tuple[str, Any]] = [
(attr_name, attr)
for attr_name, attr in (
(attr_name, getattr(dag_model, attr_name))
for attr_name in dir(dag_model)
if not attr_name.startswith("_") and attr_name not in attrs_to_avoid
)
if not callable(attr)
]
return self.render_template(
"airflow/dag_details.html",
dag=dag,
dag_model=dag_model,
title=title,
root=root,
states=states,
State=State,
active_runs=active_runs,
tags=tags,
owner_links=owner_links,
dag_model_attrs=dag_model_attrs,
)
@expose("/rendered-templates")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def rendered_templates(self, session):
"""Get rendered Dag."""
dag_id = request.args.get("dag_id")
task_id = request.args.get("task_id")
map_index = request.args.get("map_index", -1, type=int)
execution_date = request.args.get("execution_date")
dttm = _safe_parse_datetime(execution_date)
form = DateTimeForm(data={"execution_date": dttm})
root = request.args.get("root", "")
logging.info("Retrieving rendered templates.")
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date=dttm, session=session)
raw_task = dag.get_task(task_id).prepare_for_execution()
title = "Rendered Template"
html_dict = {}
ti: TaskInstance
if dag_run is None:
# No DAG run matching given logical date. This usually means this
# DAG has never been run. Task instance rendering does not really
# make sense in this situation, but "works" prior to AIP-39. This
# "fakes" a temporary DagRun-TaskInstance association (not saved to
# database) for presentation only.
ti = TaskInstance(raw_task, map_index=map_index)
ti.dag_run = DagRun(dag_id=dag_id, execution_date=dttm)
else:
ti = dag_run.get_task_instance(task_id=task_id, map_index=map_index, session=session)
if ti:
ti.refresh_from_task(raw_task)
else:
flash(f"there is no task instance with the provided map_index {map_index}", "error")
return self.render_template(
"airflow/ti_code.html",
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
map_index=map_index,
form=form,
root=root,
title=title,
)
try:
ti.get_rendered_template_fields(session=session)
except AirflowException as e:
if not e.__cause__:
flash(f"Error rendering template: {e}", "error")
else:
msg = Markup("Error rendering template: {0}<br><br>OriginalError: {0.__cause__}").format(e)
flash(msg, "error")
except Exception as e:
flash(f"Error rendering template: {e}", "error")
# Ensure we are rendering the unmapped operator. Unmapping should be
# done automatically if template fields are rendered successfully; this
# only matters if get_rendered_template_fields() raised an exception.
# The following rendering won't show useful values in this case anyway,
# but we'll display some quasi-meaingful field names.
task = ti.task.unmap(None)
renderers = wwwutils.get_attr_renderer()
for template_field in task.template_fields:
content = getattr(task, template_field)
renderer = task.template_fields_renderers.get(template_field, template_field)
if renderer in renderers:
html_dict[template_field] = renderers[renderer](content)
else:
html_dict[template_field] = Markup("<pre><code>{}</pre></code>").format(pformat(content))
if isinstance(content, dict):
if template_field == "op_kwargs":
for key, value in content.items():
renderer = task.template_fields_renderers.get(key, key)
if renderer in renderers:
html_dict[".".join([template_field, key])] = renderers[renderer](value)
else:
html_dict[".".join([template_field, key])] = Markup(
"<pre><code>{}</pre></code>"
).format(pformat(value))
else:
for dict_keys in get_key_paths(content):
template_path = ".".join((template_field, dict_keys))
renderer = task.template_fields_renderers.get(template_path, template_path)
if renderer in renderers:
content_value = get_value_from_path(dict_keys, content)
html_dict[template_path] = renderers[renderer](content_value)
return self.render_template(
"airflow/ti_code.html",
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
map_index=map_index,
form=form,
root=root,
title=title,
)
@expose("/rendered-k8s")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def rendered_k8s(self, *, session: Session = NEW_SESSION):
"""Get rendered k8s yaml."""
if not settings.IS_K8S_OR_K8SCELERY_EXECUTOR:
abort(404)
dag_id = request.args.get("dag_id")
task_id = request.args.get("task_id")
if task_id is None:
logging.warning("Task id not passed in the request")
abort(400)
execution_date = request.args.get("execution_date")
dttm = _safe_parse_datetime(execution_date)
form = DateTimeForm(data={"execution_date": dttm})
root = request.args.get("root", "")
map_index = request.args.get("map_index", -1, type=int)
logging.info("Retrieving rendered templates.")
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
dag_run = dag.get_dagrun(execution_date=dttm, session=session)
ti = dag_run.get_task_instance(task_id=task.task_id, map_index=map_index, session=session)
pod_spec = None
try:
pod_spec = ti.get_rendered_k8s_spec(session=session)
except AirflowException as e:
if not e.__cause__:
flash(f"Error rendering Kubernetes POD Spec: {e}", "error")
else:
tmp = Markup("Error rendering Kubernetes POD Spec: {0}<br><br>Original error: {0.__cause__}")
flash(tmp.format(e), "error")
except Exception as e:
flash(f"Error rendering Kubernetes Pod Spec: {e}", "error")
title = "Rendered K8s Pod Spec"
if pod_spec:
content = wwwutils.get_attr_renderer()["yaml"](yaml.dump(pod_spec))
else:
content = Markup("<pre><code>Error rendering Kubernetes POD Spec</pre></code>")
return self.render_template(
"airflow/ti_code.html",
html_dict={"k8s": content},
dag=dag,
task_id=task_id,
execution_date=execution_date,
map_index=map_index,
form=form,
root=root,
title=title,
)
@expose("/get_logs_with_metadata")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def get_logs_with_metadata(self, session: Session = NEW_SESSION):
"""Retrieve logs including metadata."""
dag_id = request.args.get("dag_id")
task_id = request.args.get("task_id")
execution_date_str = request.args["execution_date"]
map_index = request.args.get("map_index", -1, type=int)
try_number = request.args.get("try_number", type=int)
metadata_str = request.args.get("metadata", "{}")
response_format = request.args.get("format", "json")
# Validate JSON metadata
try:
metadata: dict = json.loads(metadata_str) or {}
except json.decoder.JSONDecodeError:
return {"error": "Invalid JSON metadata"}, 400
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date_str)
except ValueError:
error_message = (
f"Given execution date, {execution_date}, could not be identified as a date. "
"Example date format: 2015-11-16T14:34:15+00:00"
)
return {"error": error_message}, 400
task_log_reader = TaskLogReader()
if not task_log_reader.supports_read:
return {
"message": "Task log handler does not support read logs.",
"error": True,
"metadata": {"end_of_log": True},
}
ti = session.scalar(
select(models.TaskInstance)
.where(
TaskInstance.task_id == task_id,
TaskInstance.dag_id == dag_id,
TaskInstance.execution_date == execution_date,
TaskInstance.map_index == map_index,
)
.join(TaskInstance.dag_run)
.options(joinedload(TaskInstance.trigger).joinedload(Trigger.triggerer_job))
.limit(1)
)
if ti is None:
return {
"message": "*** Task instance did not exist in the DB\n",
"error": True,
"metadata": {"end_of_log": True},
}
try:
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if dag:
ti.task = dag.get_task(ti.task_id)
if response_format == "json":
logs, metadata = task_log_reader.read_log_chunks(ti, try_number, metadata)
message = logs[0] if try_number is not None else logs
return {"message": message, "metadata": metadata}
metadata["download_logs"] = True
attachment_filename = task_log_reader.render_log_filename(ti, try_number, session=session)
log_stream = task_log_reader.read_log_stream(ti, try_number, metadata)
return Response(
response=log_stream,
mimetype="text/plain",
headers={"Content-Disposition": f"attachment; filename={attachment_filename}"},
)
except AttributeError as e:
error_messages = [f"Task log handler does not support read logs.\n{str(e)}\n"]
metadata["end_of_log"] = True
return {"message": error_messages, "error": True, "metadata": metadata}
@expose("/log")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def log(self, session: Session = NEW_SESSION):
"""Retrieve log."""
dag_id = request.args["dag_id"]
task_id = request.args.get("task_id")
map_index = request.args.get("map_index", -1, type=int)
execution_date = request.args.get("execution_date")
if execution_date:
dttm = _safe_parse_datetime(execution_date)
else:
dttm = None
form = DateTimeForm(data={"execution_date": dttm})
dag_model = DagModel.get_dagmodel(dag_id)
ti = session.scalar(
select(models.TaskInstance)
.filter_by(dag_id=dag_id, task_id=task_id, execution_date=dttm, map_index=map_index)
.limit(1)
)
num_logs = 0
if ti is not None:
num_logs = wwwutils.get_try_count(ti._try_number, ti.state)
logs = [""] * num_logs
root = request.args.get("root", "")
return self.render_template(
"airflow/ti_log.html",
logs=logs,
dag=dag_model,
title="Log by attempts",
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
map_index=map_index,
form=form,
root=root,
wrapped=conf.getboolean("webserver", "default_wrap"),
)
@expose("/redirect_to_external_log")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def redirect_to_external_log(self, session: Session = NEW_SESSION):
"""Redirects to external log."""
dag_id = request.args.get("dag_id")
task_id = request.args.get("task_id")
execution_date = request.args.get("execution_date")
dttm = _safe_parse_datetime(execution_date)
map_index = request.args.get("map_index", -1, type=int)
try_number = request.args.get("try_number", 1)
ti = session.scalar(
select(models.TaskInstance)
.filter_by(dag_id=dag_id, task_id=task_id, execution_date=dttm, map_index=map_index)
.limit(1)
)
if not ti:
flash(f"Task [{dag_id}.{task_id}] does not exist", "error")
return redirect(url_for("Airflow.index"))
task_log_reader = TaskLogReader()
if not task_log_reader.supports_external_link:
flash("Task log handler does not support external links", "error")
return redirect(url_for("Airflow.index"))
handler = task_log_reader.log_handler
url = handler.get_external_log_url(ti, try_number)
return redirect(url)
@expose("/task")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def task(self, session: Session = NEW_SESSION):
"""Retrieve task."""
dag_id = request.args.get("dag_id")
task_id = request.args.get("task_id")
execution_date = request.args.get("execution_date")
dttm = _safe_parse_datetime(execution_date)
map_index = request.args.get("map_index", -1, type=int)
form = DateTimeForm(data={"execution_date": dttm})
root = request.args.get("root", "")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for("Airflow.index"))
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti: TaskInstance | None = session.scalar(
select(TaskInstance)
.options(
# HACK: Eager-load relationships. This is needed because
# multiple properties mis-use provide_session() that destroys
# the session object ti is bounded to.
joinedload(TaskInstance.queued_by_job, innerjoin=False),
joinedload(TaskInstance.trigger, innerjoin=False),
)
.filter_by(execution_date=dttm, dag_id=dag_id, task_id=task_id, map_index=map_index)
)
if ti is None:
ti_attrs: list[tuple[str, Any]] | None = None
else:
ti.refresh_from_task(task)
ti_attrs_to_skip = [
"dag_id",
"key",
"mark_success_url",
"log",
"log_url",
"task",
"trigger",
"triggerer_job",
]
# Some fields on TI are deprecated, but we don't want those warnings here.
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
all_ti_attrs = (
# fetching the value of _try_number to be shown under name try_number in UI
(name, getattr(ti, "_try_number" if name == "try_number" else name))
for name in dir(ti)
if not name.startswith("_") and name not in ti_attrs_to_skip
)
ti_attrs = sorted((name, attr) for name, attr in all_ti_attrs if not callable(attr))
attr_renderers = wwwutils.get_attr_renderer()
attrs_to_skip: frozenset[str] = getattr(task, "HIDE_ATTRS_FROM_UI", frozenset())
def include_task_attrs(attr_name):
return not (
attr_name == "HIDE_ATTRS_FROM_UI"
or attr_name.startswith("_")
or attr_name in attr_renderers
or attr_name in attrs_to_skip
)
task_attrs = [
(attr_name, secrets_masker.redact(attr, attr_name))
for attr_name, attr in (
(attr_name, getattr(task, attr_name)) for attr_name in filter(include_task_attrs, dir(task))
)
if not callable(attr)
]
# Color coding the special attributes that are code
special_attrs_rendered = {
attr_name: renderer(getattr(task, attr_name))
for attr_name, renderer in attr_renderers.items()
if hasattr(task, attr_name)
}
no_failed_deps_result = [
(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br>\n- The scheduler is down or under heavy load<br>\n{}\n"
"<br>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br>"
if ti and ti.state is None
else ""
),
)
]
# Use the scheduler's context to figure out which dependencies are not met
if ti is None:
failed_dep_reasons: list[tuple[str, str]] = []
else:
dep_context = DepContext(SCHEDULER_QUEUED_DEPS)
failed_dep_reasons = [
(dep.dep_name, dep.reason) for dep in ti.get_failed_dep_statuses(dep_context=dep_context)
]
title = "Task Instance Details"
return self.render_template(
"airflow/task.html",
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
map_index=map_index,
special_attrs_rendered=special_attrs_rendered,
form=form,
root=root,
dag=dag,
title=title,
)
@expose("/xcom")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
]
)
@action_logging
@provide_session
def xcom(self, session: Session = NEW_SESSION):
"""Retrieve XCOM."""
dag_id = request.args["dag_id"]
task_id = request.args.get("task_id")
map_index = request.args.get("map_index", -1, type=int)
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get("execution_date")
dttm = _safe_parse_datetime(execution_date)
form = DateTimeForm(data={"execution_date": dttm})
root = request.args.get("root", "")
dag = DagModel.get_dagmodel(dag_id)
ti = session.scalar(select(TaskInstance).filter_by(dag_id=dag_id, task_id=task_id).limit(1))
if not ti:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for("Airflow.index"))
xcom_query = session.execute(
select(XCom.key, XCom.value).where(
XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == dttm,
XCom.map_index == map_index,
)
)
attributes = [(k, v) for k, v in xcom_query if not k.startswith("_")]
title = "XCom"
return self.render_template(
"airflow/xcom.html",
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
map_index=map_index,
form=form,
root=root,
dag=dag,
title=title,
)
@expose("/delete", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
]
)
@action_logging
def delete(self):
"""Deletes DAG."""
from airflow.api.common import delete_dag
from airflow.exceptions import DagNotFound
dag_id = request.values.get("dag_id")
origin = get_safe_url(request.values.get("origin"))
redirect_url = get_safe_url(request.values.get("redirect_url"))
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash(f"DAG with id {dag_id} not found. Cannot delete", "error")
return redirect(redirect_url)
except AirflowException:
flash(
f"Cannot delete DAG with id {dag_id} because some task instances of the DAG "
"are still running. Please mark the task instances as "
"failed/succeeded before deleting the DAG",
"error",
)
return redirect(redirect_url)
flash(f"Deleting DAG with id {dag_id}. May take a couple minutes to fully disappear.")
# Upon success return to origin.
return redirect(origin)
@expose("/dags/<string:dag_id>/trigger", methods=["POST", "GET"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
@provide_session
def trigger(self, dag_id: str, session: Session = NEW_SESSION):
"""Triggers DAG Run."""
run_id = request.values.get("run_id", "").replace(" ", "+")
origin = get_safe_url(request.values.get("origin"))
unpause = request.values.get("unpause")
request_conf = request.values.get("conf")
request_execution_date = request.values.get("execution_date", default=timezone.utcnow().isoformat())
is_dag_run_conf_overrides_params = conf.getboolean("core", "dag_run_conf_overrides_params")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
dag_orm: DagModel = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id).limit(1))
# Prepare form fields with param struct details to render a proper form with schema information
form_fields = {}
for k, v in dag.params.items():
form_fields[k] = v.dump()
# If no schema is provided, auto-detect on default values
if "schema" not in form_fields[k]:
form_fields[k]["schema"] = {}
if "type" not in form_fields[k]["schema"]:
if isinstance(form_fields[k]["value"], bool):
form_fields[k]["schema"]["type"] = "boolean"
elif isinstance(form_fields[k]["value"], int):
form_fields[k]["schema"]["type"] = ["integer", "null"]
elif isinstance(form_fields[k]["value"], list):
form_fields[k]["schema"]["type"] = ["array", "null"]
elif isinstance(form_fields[k]["value"], dict):
form_fields[k]["schema"]["type"] = ["object", "null"]
# Mark markup fields as safe
if (
"description_html" in form_fields[k]["schema"]
and form_fields[k]["schema"]["description_html"]
):
form_fields[k]["description"] = Markup(form_fields[k]["schema"]["description_html"])
if "custom_html_form" in form_fields[k]["schema"]:
form_fields[k]["schema"]["custom_html_form"] = Markup(
form_fields[k]["schema"]["custom_html_form"]
)
ui_fields_defined = any("const" not in f["schema"] for f in form_fields.values())
if not dag_orm:
flash(f"Cannot find dag {dag_id}")
return redirect(origin)
if dag_orm.has_import_errors:
flash(f"Cannot create dagruns because the dag {dag_id} has import errors", "error")
return redirect(origin)
recent_runs = session.execute(
select(DagRun.conf, func.max(DagRun.run_id).label("run_id"), func.max(DagRun.execution_date))
.where(
DagRun.dag_id == dag_id,
DagRun.run_type == DagRunType.MANUAL,
DagRun.conf.isnot(None),
)
.group_by(DagRun.conf)
.order_by(func.max(DagRun.execution_date).desc())
.limit(5)
)
recent_confs = {
run_id: json.dumps(run_conf)
for run_id, run_conf in ((run.run_id, run.conf) for run in recent_runs)
if isinstance(run_conf, dict) and any(run_conf)
}
if request.method == "GET" and ui_fields_defined:
# Populate conf textarea with conf requests parameter, or dag.params
default_conf = ""
doc_md = wwwutils.wrapped_markdown(getattr(dag, "doc_md", None))
form = DateTimeForm(data={"execution_date": request_execution_date})
if request_conf:
default_conf = request_conf
else:
try:
default_conf = json.dumps(
{str(k): v.resolve(suppress_exception=True) for k, v in dag.params.items()},
indent=4,
ensure_ascii=False,
)
except TypeError:
flash("Could not pre-populate conf field due to non-JSON-serializable data-types")
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=default_conf,
doc_md=doc_md,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
try:
execution_date = timezone.parse(request_execution_date)
except ParserError:
flash("Invalid execution date", "error")
form = DateTimeForm(data={"execution_date": timezone.utcnow().isoformat()})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf if request_conf else {},
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
dr = DagRun.find_duplicate(dag_id=dag_id, run_id=run_id, execution_date=execution_date)
if dr:
if dr.run_id == run_id:
message = f"The run ID {run_id} already exists"
else:
message = f"The logical date {execution_date} already exists"
flash(message, "error")
return redirect(origin)
regex = conf.get("scheduler", "allowed_run_id_pattern")
if run_id and not re2.match(RUN_ID_REGEX, run_id):
if not regex.strip() or not re2.match(regex.strip(), run_id):
flash(
f"The provided run ID '{run_id}' is invalid. It does not match either "
f"the configured pattern: '{regex}' or the built-in pattern: '{RUN_ID_REGEX}'",
"error",
)
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
run_conf = {}
if request_conf:
try:
run_conf = json.loads(request_conf)
if not isinstance(run_conf, dict):
flash("Invalid JSON configuration, must be a dict", "error")
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration, not parseable", "error")
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
if dag.get_is_paused():
if unpause or not ui_fields_defined:
flash(f"Unpaused DAG {dag_id}.")
dag_model = models.DagModel.get_dagmodel(dag_id)
if dag_model is not None:
dag_model.set_is_paused(is_paused=False)
else:
flash(
f"DAG {dag_id} is paused, unpause if you want to have the triggered run being executed.",
"warning",
)
try:
dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=execution_date,
data_interval=dag.timetable.infer_manual_data_interval(run_after=execution_date),
state=DagRunState.QUEUED,
conf=run_conf,
external_trigger=True,
dag_hash=get_airflow_app().dag_bag.dags_hash.get(dag_id),
run_id=run_id,
)
except (ValueError, ParamValidationError) as ve:
flash(f"{ve}", "error")
form = DateTimeForm(data={"execution_date": execution_date})
# Take over "bad" submitted fields for new form display
for k, v in form_fields.items():
form_fields[k]["value"] = run_conf[k]
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
)
flash(f"Triggered {dag_id}, it should start any moment now.")
return redirect(origin)
def _clear_dag_tis(
self,
dag: DAG,
start_date: datetime.datetime | None,
end_date: datetime.datetime | None,
*,
origin: str | None,
task_ids: Collection[str | tuple[str, int]] | None = None,
recursive: bool = False,
confirmed: bool = False,
only_failed: bool = False,
session: Session,
):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=task_ids,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
session=session,
)
msg = f"{count} task instances have been cleared"
return redirect_or_json(origin, msg)
try:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=task_ids,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
dry_run=True,
session=session,
)
except AirflowException as ex:
return redirect_or_json(origin, msg=str(ex), status="error", status_code=500)
assert isinstance(tis, collections.abc.Iterable)
details = [str(t) for t in tis]
if not details:
return redirect_or_json(origin, "No task instances to clear", status="error", status_code=404)
elif request.headers.get("Accept") == "application/json":
if confirmed:
return htmlsafe_json_dumps(details, separators=(",", ":"))
return htmlsafe_json_dumps(
[{"task_id": ti.task_id, "map_index": ti.map_index, "run_id": ti.run_id} for ti in tis],
separators=(",", ":"),
)
return self.render_template(
"airflow/confirm.html",
endpoint=None,
message="Task instances you are about to clear:",
details="\n".join(details),
)
@expose("/clear", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def clear(self, *, session: Session = NEW_SESSION):
"""Clears DAG tasks."""
dag_id = request.form.get("dag_id")
task_id = request.form.get("task_id")
origin = get_safe_url(request.form.get("origin"))
dag = get_airflow_app().dag_bag.get_dag(dag_id)
group_id = request.form.get("group_id")
if "map_index" not in request.form:
map_indexes: list[int] | None = None
else:
map_indexes = request.form.getlist("map_index", type=int)
execution_date_str = request.form.get("execution_date")
execution_date = _safe_parse_datetime(execution_date_str)
confirmed = request.form.get("confirmed") == "true"
upstream = request.form.get("upstream") == "true"
downstream = request.form.get("downstream") == "true"
future = request.form.get("future") == "true"
past = request.form.get("past") == "true"
recursive = request.form.get("recursive") == "true"
only_failed = request.form.get("only_failed") == "true"
task_ids: list[str | tuple[str, int]] = []
end_date = execution_date if not future else None
start_date = execution_date if not past else None
locked_dag_run_ids: list[int] = []
if group_id is not None:
task_group_dict = dag.task_group.get_task_group_dict()
task_group = task_group_dict.get(group_id)
if task_group is None:
return redirect_or_json(
origin, msg=f"TaskGroup {group_id} could not be found", status="error", status_code=404
)
task_ids = task_ids_or_regex = [t.task_id for t in task_group.iter_tasks()]
# Lock the related dag runs to prevent from possible dead lock.
# https://github.com/apache/airflow/pull/26658
dag_runs_query = session.scalars(
select(DagRun.id).where(DagRun.dag_id == dag_id).with_for_update()
)
if start_date is None and end_date is None:
dag_runs_query = dag_runs_query.where(DagRun.execution_date == start_date)
else:
if start_date is not None:
dag_runs_query = dag_runs_query.where(DagRun.execution_date >= start_date)
if end_date is not None:
dag_runs_query = dag_runs_query.where(DagRun.execution_date <= end_date)
locked_dag_run_ids = dag_runs_query.all()
elif task_id:
if map_indexes is None:
task_ids = [task_id]
else:
task_ids = [(task_id, map_index) for map_index in map_indexes]
task_ids_or_regex = [task_id]
dag = dag.partial_subset(
task_ids_or_regex=task_ids_or_regex,
include_downstream=downstream,
include_upstream=upstream,
)
if len(dag.task_dict) > 1:
# If we had upstream/downstream etc then also include those!
task_ids.extend(tid for tid in dag.task_dict if tid != task_id)
response = self._clear_dag_tis(
dag,
start_date,
end_date,
origin=origin,
task_ids=task_ids,
recursive=recursive,
confirmed=confirmed,
only_failed=only_failed,
session=session,
)
del locked_dag_run_ids
return response
@expose("/dagrun_clear", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def dagrun_clear(self, *, session: Session = NEW_SESSION):
"""Clears the DagRun."""
dag_id = request.form.get("dag_id")
dag_run_id = request.form.get("dag_run_id")
confirmed = request.form.get("confirmed") == "true"
dag = get_airflow_app().dag_bag.get_dag(dag_id)
dr = dag.get_dagrun(run_id=dag_run_id)
start_date = dr.logical_date
end_date = dr.logical_date
return self._clear_dag_tis(
dag,
start_date,
end_date,
origin=None,
recursive=True,
confirmed=confirmed,
session=session,
)
@expose("/blocked", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def blocked(self, session: Session = NEW_SESSION):
"""Mark Dag Blocked."""
allowed_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist("dag_ids") if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return flask.json.jsonify([])
dags = session.execute(
select(DagRun.dag_id, sqla.func.count(DagRun.id))
.where(DagRun.state == DagRunState.RUNNING)
.where(DagRun.dag_id.in_(filter_dag_ids))
.group_by(DagRun.dag_id)
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if dag:
# TODO: Make max_active_runs a column so we can query for it directly
max_active_runs = dag.max_active_runs
payload.append(
{
"dag_id": dag_id,
"active_dag_run": active_dag_runs,
"max_active_runs": max_active_runs,
}
)
return flask.json.jsonify(payload)
def _mark_dagrun_state_as_failed(self, dag_id, dag_run_id, confirmed):
if not dag_run_id:
return {"status": "error", "message": "Invalid dag_run_id"}
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
return {"status": "error", "message": f"Cannot find DAG: {dag_id}"}
new_dag_state = set_dag_run_state_to_failed(dag=dag, run_id=dag_run_id, commit=confirmed)
if confirmed:
return {"status": "success", "message": f"Marked failed on {len(new_dag_state)} task instances"}
else:
details = [str(t) for t in new_dag_state]
return htmlsafe_json_dumps(details, separators=(",", ":"))
def _mark_dagrun_state_as_success(self, dag_id, dag_run_id, confirmed):
if not dag_run_id:
return {"status": "error", "message": "Invalid dag_run_id"}
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
return {"status": "error", "message": f"Cannot find DAG: {dag_id}"}
new_dag_state = set_dag_run_state_to_success(dag=dag, run_id=dag_run_id, commit=confirmed)
if confirmed:
return {"status": "success", "message": f"Marked success on {len(new_dag_state)} task instances"}
else:
details = [str(t) for t in new_dag_state]
return htmlsafe_json_dumps(details, separators=(",", ":"))
@provide_session
def _mark_dagrun_state_as_queued(
self,
dag_id: str,
dag_run_id: str,
confirmed: bool,
session: Session = NEW_SESSION,
):
if not dag_run_id:
return {"status": "error", "message": "Invalid dag_run_id"}
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
return {"status": "error", "message": f"Cannot find DAG: {dag_id}"}
set_dag_run_state_to_queued(dag=dag, run_id=dag_run_id, commit=confirmed)
if confirmed:
return {"status": "success", "message": "Marked the DagRun as queued."}
else:
# Identify tasks that will be queued up to run when confirmed
all_task_ids = [task.task_id for task in dag.tasks]
existing_tis = session.execute(
select(TaskInstance.task_id).where(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.run_id == dag_run_id,
)
)
completed_tis_ids = [task_id for task_id, in existing_tis]
tasks_with_no_state = list(set(all_task_ids) - set(completed_tis_ids))
details = [str(t) for t in tasks_with_no_state]
return htmlsafe_json_dumps(details, separators=(",", ":"))
@expose("/dagrun_failed", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_failed(self):
"""Mark DagRun failed."""
dag_id = request.form.get("dag_id")
dag_run_id = request.form.get("dag_run_id")
confirmed = request.form.get("confirmed") == "true"
return self._mark_dagrun_state_as_failed(dag_id, dag_run_id, confirmed)
@expose("/dagrun_success", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_success(self):
"""Mark DagRun success."""
dag_id = request.form.get("dag_id")
dag_run_id = request.form.get("dag_run_id")
confirmed = request.form.get("confirmed") == "true"
return self._mark_dagrun_state_as_success(dag_id, dag_run_id, confirmed)
@expose("/dagrun_queued", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_queued(self):
"""Queue DagRun so tasks that haven't run yet can be started."""
dag_id = request.form.get("dag_id")
dag_run_id = request.form.get("dag_run_id")
confirmed = request.form.get("confirmed") == "true"
return self._mark_dagrun_state_as_queued(dag_id, dag_run_id, confirmed)
@expose("/dagrun_details")
def dagrun_details(self):
"""Redirect to the GRID DAGRun page. This is avoids breaking links."""
dag_id = request.args.get("dag_id")
run_id = request.args.get("run_id")
return redirect(url_for("Airflow.grid", dag_id=dag_id, dag_run_id=run_id))
def _mark_task_instance_state(
self,
*,
dag_id: str,
run_id: str,
task_id: str,
map_indexes: list[int] | None,
origin: str,
upstream: bool,
downstream: bool,
future: bool,
past: bool,
state: TaskInstanceState,
):
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not run_id:
flash(f"Cannot mark tasks as {state}, seem that DAG {dag_id} has never run", "error")
return redirect(origin)
altered = dag.set_task_instance_state(
task_id=task_id,
map_indexes=map_indexes,
run_id=run_id,
state=state,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
)
flash(f"Marked {state} on {len(altered)} task instances")
return redirect(origin)
def _mark_task_group_state(
self,
*,
dag_id: str,
run_id: str,
group_id: str,
origin: str,
upstream: bool,
downstream: bool,
future: bool,
past: bool,
state: TaskInstanceState,
):
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not run_id:
flash(f"Cannot mark tasks as {state}, as DAG {dag_id} has never run", "error")
return redirect(origin)
altered = dag.set_task_group_state(
group_id=group_id,
run_id=run_id,
state=state,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
)
flash(f"Marked {state} on {len(altered)} task instances")
return redirect(origin)
@expose("/confirm", methods=["GET"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def confirm(self):
"""Show confirmation page for marking tasks as success or failed."""
args = request.args
dag_id = args.get("dag_id")
task_id = args.get("task_id")
dag_run_id = args.get("dag_run_id")
state = args.get("state")
origin = get_safe_url(args.get("origin"))
group_id = args.get("group_id")
if "map_index" not in args:
map_indexes: list[int] | None = None
else:
map_indexes = args.getlist("map_index", type=int)
upstream = to_boolean(args.get("upstream"))
downstream = to_boolean(args.get("downstream"))
future = to_boolean(args.get("future"))
past = to_boolean(args.get("past"))
origin = origin or url_for("Airflow.index")
if not exactly_one(task_id, group_id):
raise ValueError("Exactly one of task_id or group_id must be provided")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
msg = f"DAG {dag_id} not found"
return redirect_or_json(origin, msg, status="error", status_code=404)
if state not in (
"success",
"failed",
):
msg = f"Invalid state {state}, must be either 'success' or 'failed'"
return redirect_or_json(origin, msg, status="error", status_code=400)
latest_execution_date = dag.get_latest_execution_date()
if not latest_execution_date:
msg = f"Cannot mark tasks as {state}, seem that dag {dag_id} has never run"
return redirect_or_json(origin, msg, status="error", status_code=400)
tasks: list[Operator | tuple[Operator, int]] = []
if group_id:
task_group_dict = dag.task_group.get_task_group_dict()
task_group = task_group_dict.get(group_id)
if task_group is None:
return redirect_or_json(
origin, msg=f"TaskGroup {group_id} could not be found", status="error", status_code=404
)
tasks = [task for task in task_group.iter_tasks()]
elif task_id:
try:
task = dag.get_task(task_id)
except airflow.exceptions.TaskNotFound:
msg = f"Task {task_id} not found"
return redirect_or_json(origin, msg, status="error", status_code=404)
task.dag = dag
if map_indexes is None:
tasks = [task]
else:
tasks = [(task, map_index) for map_index in map_indexes]
to_be_altered = set_state(
tasks=tasks,
run_id=dag_run_id,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=False,
)
if request.headers.get("Accept") == "application/json":
return htmlsafe_json_dumps(
[
{"task_id": ti.task_id, "map_index": ti.map_index, "run_id": ti.run_id}
for ti in to_be_altered
],
separators=(",", ":"),
)
details = "\n".join(str(t) for t in to_be_altered)
response = self.render_template(
"airflow/confirm.html",
endpoint=url_for(f"Airflow.{state}"),
message=f"Task instances you are about to mark as {state}:",
details=details,
)
return response
@expose("/failed", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def failed(self):
"""Mark task or task_group as failed."""
args = request.form
dag_id = args.get("dag_id")
task_id = args.get("task_id")
run_id = args.get("dag_run_id")
group_id = args.get("group_id")
if not exactly_one(task_id, group_id):
raise ValueError("Exactly one of task_id or group_id must be provided")
if "map_index" not in args:
map_indexes: list[int] | None = None
else:
map_indexes = args.getlist("map_index", type=int)
origin = get_safe_url(args.get("origin"))
upstream = to_boolean(args.get("upstream"))
downstream = to_boolean(args.get("downstream"))
future = to_boolean(args.get("future"))
past = to_boolean(args.get("past"))
if task_id:
return self._mark_task_instance_state(
dag_id=dag_id,
run_id=run_id,
task_id=task_id,
map_indexes=map_indexes,
origin=origin,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=TaskInstanceState.FAILED,
)
elif group_id:
return self._mark_task_group_state(
dag_id=dag_id,
run_id=run_id,
group_id=group_id,
origin=origin,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=TaskInstanceState.FAILED,
)
@expose("/success", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def success(self):
"""Mark task or task_group as success."""
args = request.form
dag_id = args.get("dag_id")
task_id = args.get("task_id")
run_id = args.get("dag_run_id")
group_id = args.get("group_id")
if not exactly_one(task_id, group_id):
raise ValueError("Exactly one of task_id or group_id must be provided")
if "map_index" not in args:
map_indexes: list[int] | None = None
else:
map_indexes = args.getlist("map_index", type=int)
origin = get_safe_url(args.get("origin"))
upstream = to_boolean(args.get("upstream"))
downstream = to_boolean(args.get("downstream"))
future = to_boolean(args.get("future"))
past = to_boolean(args.get("past"))
if task_id:
return self._mark_task_instance_state(
dag_id=dag_id,
run_id=run_id,
task_id=task_id,
map_indexes=map_indexes,
origin=origin,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=TaskInstanceState.SUCCESS,
)
elif group_id:
return self._mark_task_group_state(
dag_id=dag_id,
run_id=run_id,
group_id=group_id,
origin=origin,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=TaskInstanceState.SUCCESS,
)
@expose("/dags/<string:dag_id>")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
def dag(self, dag_id):
"""Redirect to default DAG view."""
kwargs = {**sanitize_args(request.args), "dag_id": dag_id}
return redirect(url_for("Airflow.grid", **kwargs))
@expose("/tree")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
def legacy_tree(self):
"""Redirect to the replacement - grid view. Kept for backwards compatibility."""
return redirect(url_for("Airflow.grid", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/grid")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
@provide_session
def grid(self, dag_id: str, session: Session = NEW_SESSION):
"""Get Dag's grid view."""
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing from DagBag.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
root = request.args.get("root")
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_downstream=False, include_upstream=True)
num_runs = request.args.get("num_runs", type=int)
if num_runs is None:
num_runs = conf.getint("webserver", "default_dag_run_display_number")
doc_md = wwwutils.wrapped_markdown(getattr(dag, "doc_md", None))
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
default_dag_run_display_number = conf.getint("webserver", "default_dag_run_display_number")
num_runs_options = [5, 25, 50, 100, 365]
if default_dag_run_display_number not in num_runs_options:
insort_left(num_runs_options, default_dag_run_display_number)
return self.render_template(
"airflow/grid.html",
root=root,
dag=dag,
doc_md=doc_md,
num_runs=num_runs,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
dag_model=dag_model,
auto_refresh_interval=conf.getint("webserver", "auto_refresh_interval"),
default_dag_run_display_number=default_dag_run_display_number,
default_wrap=conf.getboolean("webserver", "default_wrap"),
filters_drop_down_values=htmlsafe_json_dumps(
{
"taskStates": [state.value for state in TaskInstanceState],
"dagStates": [state.value for state in State.dag_states],
"runTypes": [run_type.value for run_type in DagRunType],
"numRuns": num_runs_options,
}
),
)
@expose("/calendar")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@gzipped
@action_logging
def legacy_calendar(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.calendar", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/calendar")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@gzipped
@action_logging
@provide_session
def calendar(self, dag_id: str, session: Session = NEW_SESSION):
"""Get DAG runs as calendar."""
def _convert_to_date(session, column):
"""Convert column to date."""
if session.bind.dialect.name == "mssql":
return column.cast(Date)
else:
return func.date(column)
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing from DagBag.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
root = request.args.get("root")
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_downstream=False, include_upstream=True)
dag_states = session.execute(
select(
_convert_to_date(session, DagRun.execution_date).label("date"),
DagRun.state,
func.max(DagRun.data_interval_start).label("data_interval_start"),
func.max(DagRun.data_interval_end).label("data_interval_end"),
func.count("*").label("count"),
)
.where(DagRun.dag_id == dag.dag_id)
.group_by(_convert_to_date(session, DagRun.execution_date), DagRun.state)
.order_by(_convert_to_date(session, DagRun.execution_date).asc())
).all()
data_dag_states = [
{
# DATE() in SQLite and MySQL behave differently:
# SQLite returns a string, MySQL returns a date.
"date": dr.date if isinstance(dr.date, str) else dr.date.isoformat(),
"state": dr.state,
"count": dr.count,
}
for dr in dag_states
]
if dag_states and dag_states[-1].data_interval_start and dag_states[-1].data_interval_end:
last_automated_data_interval = DataInterval(
timezone.coerce_datetime(dag_states[-1].data_interval_start),
timezone.coerce_datetime(dag_states[-1].data_interval_end),
)
year = last_automated_data_interval.end.year
restriction = TimeRestriction(dag.start_date, dag.end_date, False)
dates: dict[datetime.date, int] = collections.Counter()
if isinstance(dag.timetable, CronMixin):
# Optimized calendar generation for timetables based on a cron expression.
dates_iter: Iterator[datetime.datetime | None] = croniter(
dag.timetable._expression,
start_time=last_automated_data_interval.end,
ret_type=datetime.datetime,
)
for dt in dates_iter:
if dt is None:
break
if dt.year != year:
break
if dag.end_date and dt > dag.end_date:
break
dates[dt.date()] += 1
else:
prev_logical_date = DateTime.min
while True:
curr_info = dag.timetable.next_dagrun_info(
last_automated_data_interval=last_automated_data_interval,
restriction=restriction,
)
if curr_info is None:
break # Reached the end.
if curr_info.logical_date <= prev_logical_date:
break # We're not progressing. Maybe a malformed timetable? Give up.
if curr_info.logical_date.year != year:
break # Crossed the year boundary.
last_automated_data_interval = curr_info.data_interval
dates[curr_info.logical_date] += 1
prev_logical_date = curr_info.logical_date
data_dag_states.extend(
{"date": date.isoformat(), "state": "planned", "count": count}
for (date, count) in dates.items()
)
now = DateTime.utcnow()
data = {
"dag_states": data_dag_states,
"start_date": (dag.start_date or now).date().isoformat(),
"end_date": (dag.end_date or now).date().isoformat(),
}
return self.render_template(
"airflow/calendar.html",
dag=dag,
doc_md=wwwutils.wrapped_markdown(getattr(dag, "doc_md", None)),
data=htmlsafe_json_dumps(data, separators=(",", ":")), # Avoid spaces to reduce payload size.
root=root,
dag_model=dag_model,
)
@expose("/graph")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
def legacy_graph(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.graph", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/graph")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
@provide_session
def graph(self, dag_id: str, session: Session = NEW_SESSION):
"""Get DAG as Graph."""
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
root = request.args.get("root")
if root:
filter_upstream = request.args.get("filter_upstream") == "true"
filter_downstream = request.args.get("filter_downstream") == "true"
dag = dag.partial_subset(
task_ids_or_regex=root, include_upstream=filter_upstream, include_downstream=filter_downstream
)
arrange = request.args.get("arrange", dag.orientation)
nodes = task_group_to_dict(dag.task_group)
edges = dag_edges(dag)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data["arrange"] = arrange
dttm = dt_nr_dr_data["dttm"]
dag_run = dag.get_dagrun(execution_date=dttm)
dag_run_id = dag_run.run_id if dag_run else None
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
"""Graph Form class."""
arrange = SelectField(
"Layout",
choices=(
("LR", "Left > Right"),
("RL", "Right > Left"),
("TB", "Top > Bottom"),
("BT", "Bottom > Top"),
),
)
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data["dr_choices"]
task_instances = {}
for ti in dag.get_task_instances(dttm, dttm):
if ti.task_id not in task_instances:
task_instances[ti.task_id] = wwwutils.get_instance_with_map(ti, session)
# Need to add operator_name explicitly because it's not a column in task_instances model.
task_instances[ti.task_id]["operator_name"] = ti.operator_name
tasks = {
t.task_id: {
"dag_id": t.dag_id,
"task_type": t.task_type,
"operator_name": t.operator_name,
"extra_links": t.extra_links,
"is_mapped": isinstance(t, MappedOperator),
"trigger_rule": t.trigger_rule,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = wwwutils.wrapped_markdown(getattr(dag, "doc_md", None))
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
state_priority = ["no_status" if p is None else p for p in wwwutils.priority]
return self.render_template(
"airflow/graph.html",
dag=dag,
form=form,
dag_run_id=dag_run_id,
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data["dr_state"]),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
{op.operator_name: op for op in dag.tasks}.values(), key=lambda x: x.operator_name
),
root=root or "",
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
dag_run_state=dt_nr_dr_data["dr_state"],
dag_model=dag_model,
auto_refresh_interval=conf.getint("webserver", "auto_refresh_interval"),
state_priority=state_priority,
)
@expose("/duration")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def legacy_duration(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.duration", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/duration")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def duration(self, dag_id: str, session: Session = NEW_SESSION):
"""Get Dag as duration graph."""
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
default_dag_run = conf.getint("webserver", "default_dag_run_display_number")
base_date_str = request.args.get("base_date")
num_runs = request.args.get("num_runs", default=default_dag_run, type=int)
if base_date_str:
base_date = _safe_parse_datetime(base_date_str)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
root = request.args.get("root")
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart",
x_custom_format=True,
x_axis_date=True,
x_axis_format=LINECHART_X_AXIS_TICKFORMAT,
height=chart_height,
chart_attr=self.line_chart_attr,
)
cum_chart = nvd3.lineChart(
name="cumLineChart",
x_custom_format=True,
x_axis_date=True,
x_axis_format=LINECHART_X_AXIS_TICKFORMAT,
height=chart_height,
chart_attr=self.line_chart_attr,
)
y_points = defaultdict(list)
x_points = defaultdict(list)
task_instances = dag.get_task_instances_before(base_date, num_runs, session=session)
if task_instances:
min_date = task_instances[0].execution_date
else:
min_date = timezone.utc_epoch()
ti_fails = (
select(TaskFail)
.join(TaskFail.dag_run)
.where(
TaskFail.dag_id == dag.dag_id,
DagRun.execution_date >= min_date,
DagRun.execution_date <= base_date,
)
)
if dag.partial:
ti_fails = ti_fails.where(TaskFail.task_id.in_([t.task_id for t in dag.tasks]))
ti_fails = session.scalars(ti_fails)
fails_totals: dict[tuple[str, str, str], int] = defaultdict(int)
for failed_task_instance in ti_fails:
dict_key = (
failed_task_instance.dag_id,
failed_task_instance.task_id,
failed_task_instance.run_id,
)
if failed_task_instance.duration:
fails_totals[dict_key] += failed_task_instance.duration
# we must group any mapped TIs by dag_id, task_id, run_id
mapped_tis = set()
tis_grouped = itertools.groupby(task_instances, lambda x: (x.dag_id, x.task_id, x.run_id))
for _, group in tis_grouped:
tis = list(group)
duration = sum(x.duration for x in tis if x.duration)
if duration:
first_ti = tis[0]
if first_ti.map_index >= 0:
mapped_tis.add(first_ti.task_id)
date_time = wwwutils.epoch(first_ti.execution_date)
x_points[first_ti.task_id].append(date_time)
fails_dict_key = (first_ti.dag_id, first_ti.task_id, first_ti.run_id)
fails_total = fails_totals[fails_dict_key]
y_points[first_ti.task_id].append(float(duration + fails_total))
cumulative_y = {k: list(itertools.accumulate(v)) for k, v in y_points.items()}
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cumulative_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis("yAxis", format=".02f", custom_format=False, label=f"Duration ({y_unit})")
chart.axislist["yAxis"]["axisLabelDistance"] = "-15"
cum_chart.create_y_axis("yAxis", format=".02f", custom_format=False, label=f"Duration ({cum_y_unit})")
cum_chart.axislist["yAxis"]["axisLabelDistance"] = "-15"
for task_id in x_points:
chart.add_serie(
name=task_id + "[]" if task_id in mapped_tis else task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
cum_chart.add_serie(
name=task_id + "[]" if task_id in mapped_tis else task_id,
x=x_points[task_id],
y=scale_time_units(cumulative_y[task_id], cum_y_unit),
)
dates = sorted({ti.execution_date for ti in task_instances})
max_date = max(ti.execution_date for ti in task_instances) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(
data={
"base_date": max_date or timezone.utcnow(),
"num_runs": num_runs,
}
)
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind("});")
cum_chart.htmlcontent = (
f"{cum_chart.htmlcontent[:s_index]}"
"$( document ).trigger('chartload')"
f"{cum_chart.htmlcontent[s_index:]}"
)
return self.render_template(
"airflow/duration_chart.html",
dag=dag,
root=root,
form=form,
chart=Markup(chart.htmlcontent),
cum_chart=Markup(cum_chart.htmlcontent),
dag_model=dag_model,
)
@expose("/tries")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def legacy_tries(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.tries", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/tries")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def tries(self, dag_id: str, session: Session = NEW_SESSION):
"""Shows all tries."""
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
default_dag_run = conf.getint("webserver", "default_dag_run_display_number")
base_date_str = request.args.get("base_date")
num_runs = request.args.get("num_runs", default=default_dag_run, type=int)
if base_date_str:
base_date = _safe_parse_datetime(base_date_str)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
root = request.args.get("root")
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart",
x_custom_format=True,
x_axis_date=True,
x_axis_format=LINECHART_X_AXIS_TICKFORMAT,
height=chart_height,
chart_attr=self.line_chart_attr,
)
tis = dag.get_task_instances_before(base_date, num_runs, session=session)
for task in dag.tasks:
y_points = []
x_points = []
for ti in tis:
if ti.task_id != task.task_id:
continue
dttm = wwwutils.epoch(ti.execution_date)
x_points.append(dttm)
# y value should reflect completed tries to have a 0 baseline.
y_points.append(ti.prev_attempted_tries)
if x_points:
chart.add_serie(name=task.task_id, x=x_points, y=y_points)
tries = sorted({ti.try_number for ti in tis})
max_date = max(ti.execution_date for ti in tis) if tries else None
chart.create_y_axis("yAxis", format=".02f", custom_format=False, label="Tries")
chart.axislist["yAxis"]["axisLabelDistance"] = "-15"
session.commit()
form = DateTimeWithNumRunsForm(
data={
"base_date": max_date or timezone.utcnow(),
"num_runs": num_runs,
}
)
chart.buildcontent()
return self.render_template(
"airflow/chart.html",
dag=dag,
root=root,
form=form,
chart=Markup(chart.htmlcontent),
tab_title="Tries",
dag_model=dag_model,
)
@expose("/landing_times")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def legacy_landing_times(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.landing_times", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/landing-times")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def landing_times(self, dag_id: str, session: Session = NEW_SESSION):
"""Shows landing times."""
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
default_dag_run = conf.getint("webserver", "default_dag_run_display_number")
base_date_str = request.args.get("base_date")
num_runs = request.args.get("num_runs", default=default_dag_run, type=int)
if base_date_str:
base_date = _safe_parse_datetime(base_date_str)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
root = request.args.get("root")
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
tis = dag.get_task_instances_before(base_date, num_runs, session=session)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart",
x_custom_format=True,
x_axis_date=True,
x_axis_format=LINECHART_X_AXIS_TICKFORMAT,
height=chart_height,
chart_attr=self.line_chart_attr,
)
y_points: dict[str, list[float]] = collections.defaultdict(list)
x_points: dict[str, list[tuple[int]]] = collections.defaultdict(list)
for task in dag.tasks:
task_id = task.task_id
for ti in tis:
if ti.task_id != task.task_id:
continue
ts = dag.get_run_data_interval(ti.dag_run).end
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x_points[task_id].append(dttm)
y_points[task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis("yAxis", format=".02f", custom_format=False, label=f"Landing Time ({y_unit})")
chart.axislist["yAxis"]["axisLabelDistance"] = "-15"
for task_id in x_points:
chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
max_date = max(ti.execution_date for ti in tis) if tis else None
session.commit()
form = DateTimeWithNumRunsForm(
data={
"base_date": max_date or timezone.utcnow(),
"num_runs": num_runs,
}
)
chart.buildcontent()
return self.render_template(
"airflow/chart.html",
dag=dag,
chart=Markup(chart.htmlcontent),
height=f"{chart_height + 100}px",
root=root,
form=form,
tab_title="Landing times",
dag_model=dag_model,
)
@expose("/paused", methods=["POST"])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
def paused(self):
"""Toggle paused."""
dag_id = request.args.get("dag_id")
is_paused = request.args.get("is_paused") == "false"
models.DagModel.get_dagmodel(dag_id).set_is_paused(is_paused=is_paused)
return "OK"
@expose("/gantt")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def legacy_gantt(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.gantt", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/gantt")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def gantt(self, dag_id: str, session: Session = NEW_SESSION):
"""Show GANTT chart."""
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for("Airflow.index"))
wwwutils.check_import_errors(dag.fileloc, session)
wwwutils.check_dag_warnings(dag.dag_id, session)
root = request.args.get("root")
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data["dttm"]
dag_run = dag.get_dagrun(execution_date=dttm)
dag_run_id = dag_run.run_id if dag_run else None
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data["dr_choices"]
tis = session.scalars(
select(TaskInstance)
.where(
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == dag_run_id,
TaskInstance.start_date.is_not(None),
TaskInstance.state.is_not(None),
)
.order_by(TaskInstance.start_date)
).all()
ti_fails = select(TaskFail).filter_by(run_id=dag_run_id, dag_id=dag_id)
if dag.partial:
ti_fails = ti_fails.where(TaskFail.task_id.in_([t.task_id for t in dag.tasks]))
ti_fails = session.scalars(ti_fails)
tasks = []
for ti in tis:
if not dag.has_task(ti.task_id):
continue
# prev_attempted_tries will reflect the currently running try_number
# or the try_number of the last complete run
# https://issues.apache.org/jira/browse/AIRFLOW-2143
try_count = ti.prev_attempted_tries if ti.prev_attempted_tries != 0 else ti.try_number
task_dict = alchemy_to_dict(ti) or {}
task_dict["end_date"] = task_dict["end_date"] or timezone.utcnow()
task_dict["extraLinks"] = dag.get_task(ti.task_id).extra_links
task_dict["try_number"] = try_count
task_dict["execution_date"] = dttm.isoformat()
task_dict["run_id"] = dag_run_id
tasks.append(task_dict)
tf_count = 0
try_count = 1
prev_task_id = ""
for failed_task_instance in ti_fails:
if not dag.has_task(failed_task_instance.task_id):
continue
if tf_count != 0 and failed_task_instance.task_id == prev_task_id:
try_count += 1
else:
try_count = 1
prev_task_id = failed_task_instance.task_id
tf_count += 1
task = dag.get_task(failed_task_instance.task_id)
task_dict = alchemy_to_dict(failed_task_instance) or {}
end_date = task_dict["end_date"] or timezone.utcnow()
task_dict["end_date"] = end_date
task_dict["start_date"] = task_dict["start_date"] or end_date
task_dict["state"] = TaskInstanceState.FAILED
task_dict["operator"] = task.operator_name
task_dict["try_number"] = try_count
task_dict["extraLinks"] = task.extra_links
task_dict["execution_date"] = dttm.isoformat()
task_dict["run_id"] = dag_run_id
tasks.append(task_dict)
task_names = [ti.task_id for ti in tis]
data = {
"taskNames": task_names,
"tasks": tasks,
"height": len(task_names) * 25 + 25,
}
session.commit()
return self.render_template(
"airflow/gantt.html",
dag=dag,
dag_run_id=dag_run_id,
execution_date=dttm.isoformat(),
form=form,
data=data,
base_date="",
root=root,
dag_model=dag_model,
)
@expose("/extra_links")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def extra_links(self, *, session: Session = NEW_SESSION):
"""
A restful endpoint that returns external links for a given Operator.
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get("dag_id")
task_id = request.args.get("task_id")
map_index = request.args.get("map_index", -1, type=int)
execution_date = request.args.get("execution_date")
dttm = _safe_parse_datetime(execution_date)
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
return {"url": None, "error": f"can't find dag {dag} or task_id {task_id}"}, 404
task: AbstractOperator = dag.get_task(task_id)
link_name = request.args.get("link_name")
if link_name is None:
return {"url": None, "error": "Link name not passed"}, 400
ti = session.scalar(
select(TaskInstance)
.filter_by(dag_id=dag_id, task_id=task_id, execution_date=dttm, map_index=map_index)
.options(joinedload(TaskInstance.dag_run))
.limit(1)
)
if not ti:
return {"url": None, "error": "Task Instances not found"}, 404
try:
url = task.get_extra_links(ti, link_name)
except ValueError as err:
return {"url": None, "error": str(err)}, 404
if url:
return {"error": None, "url": url}
else:
return {"url": None, "error": f"No URL found for {link_name}"}, 404
@expose("/object/graph_data")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
@provide_session
def graph_data(self, session: Session = NEW_SESSION):
"""Get Graph Data."""
dag_id = request.args.get("dag_id")
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
root = request.args.get("root")
if root:
filter_upstream = request.args.get("filter_upstream") == "true"
filter_downstream = request.args.get("filter_downstream") == "true"
dag = dag.partial_subset(
task_ids_or_regex=root, include_upstream=filter_upstream, include_downstream=filter_downstream
)
nodes = task_group_to_dict(dag.task_group)
edges = dag_edges(dag)
data = {
"arrange": dag.orientation,
"nodes": nodes,
"edges": edges,
}
return (
htmlsafe_json_dumps(data, separators=(",", ":"), dumps=flask.json.dumps),
{"Content-Type": "application/json; charset=utf-8"},
)
@expose("/object/task_instances")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def task_instances(self):
"""Shows task instances."""
dag_id = request.args.get("dag_id")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
dttm = request.args.get("execution_date")
if dttm:
dttm = _safe_parse_datetime(dttm)
else:
return {"error": f"Invalid execution_date {dttm}"}, 400
with create_session() as session:
task_instances = {
ti.task_id: wwwutils.get_instance_with_map(ti, session)
for ti in dag.get_task_instances(dttm, dttm)
}
return flask.json.jsonify(task_instances)
@expose("/object/grid_data")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
def grid_data(self):
"""Returns grid data."""
dag_id = request.args.get("dag_id")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
return {"error": f"can't find dag {dag_id}"}, 404
root = request.args.get("root")
if root:
filter_upstream = request.args.get("filter_upstream") == "true"
filter_downstream = request.args.get("filter_downstream") == "true"
dag = dag.partial_subset(
task_ids_or_regex=root, include_upstream=filter_upstream, include_downstream=filter_downstream
)
num_runs = request.args.get("num_runs", type=int)
if num_runs is None:
num_runs = conf.getint("webserver", "default_dag_run_display_number")
try:
base_date = timezone.parse(request.args["base_date"])
except (KeyError, ValueError):
base_date = dag.get_latest_execution_date() or timezone.utcnow()
with create_session() as session:
query = select(DagRun).where(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
run_type = request.args.get("run_type")
if run_type:
query = query.where(DagRun.run_type == run_type)
run_state = request.args.get("run_state")
if run_state:
query = query.where(DagRun.state == run_state)
dag_runs = wwwutils.sorted_dag_runs(
query, ordering=dag.timetable.run_ordering, limit=num_runs, session=session
)
encoded_runs = [wwwutils.encode_dag_run(dr, json_encoder=utils_json.WebEncoder) for dr in dag_runs]
data = {
"groups": dag_to_grid(dag, dag_runs, session),
"dag_runs": encoded_runs,
"ordering": dag.timetable.run_ordering,
}
# avoid spaces to reduce payload size
return (
htmlsafe_json_dumps(data, separators=(",", ":"), dumps=flask.json.dumps),
{"Content-Type": "application/json; charset=utf-8"},
)
@expose("/object/historical_metrics_data")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),
]
)
def historical_metrics_data(self):
"""Returns cluster activity historical metrics."""
start_date = _safe_parse_datetime(request.args.get("start_date"))
end_date = _safe_parse_datetime(request.args.get("end_date"))
with create_session() as session:
# DagRuns
dag_runs_type = session.execute(
select(DagRun.run_type, func.count(DagRun.run_id))
.where(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(DagRun.run_type)
).all()
dag_run_states = session.execute(
select(DagRun.state, func.count(DagRun.run_id))
.where(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(DagRun.state)
).all()
# TaskInstances
task_instance_states = session.execute(
select(TaskInstance.state, func.count(TaskInstance.run_id))
.join(TaskInstance.dag_run)
.where(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(TaskInstance.state)
).all()
data = {
"dag_run_types": {
**{dag_run_type.value: 0 for dag_run_type in DagRunType},
**{run_type: sum_value for run_type, sum_value in dag_runs_type},
},
"dag_run_states": {
**{dag_run_state.value: 0 for dag_run_state in DagRunState},
**{run_state: sum_value for run_state, sum_value in dag_run_states},
},
"task_instance_states": {
"no_status": 0,
**{ti_state.value: 0 for ti_state in TaskInstanceState},
**{ti_state or "no_status": sum_value for ti_state, sum_value in task_instance_states},
},
}
return (
htmlsafe_json_dumps(data, separators=(",", ":"), dumps=flask.json.dumps),
{"Content-Type": "application/json; charset=utf-8"},
)
@expose("/object/next_run_datasets/<string:dag_id>")
@auth.has_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
def next_run_datasets(self, dag_id):
"""Returns datasets necessary, and their status, for the next dag run."""
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
return {"error": f"can't find dag {dag_id}"}, 404
with create_session() as session:
data = [
dict(info)
for info in session.execute(
select(
DatasetModel.id,
DatasetModel.uri,
func.max(DatasetEvent.timestamp).label("lastUpdate"),
)
.join(
DagScheduleDatasetReference, DagScheduleDatasetReference.dataset_id == DatasetModel.id
)
.join(
DatasetDagRunQueue,
and_(
DatasetDagRunQueue.dataset_id == DatasetModel.id,
DatasetDagRunQueue.target_dag_id == DagScheduleDatasetReference.dag_id,
),
isouter=True,
)
.join(
DatasetEvent,
DatasetEvent.dataset_id == DatasetModel.id,
isouter=True,
)
.where(DagScheduleDatasetReference.dag_id == dag_id, ~DatasetModel.is_orphaned)
.group_by(DatasetModel.id, DatasetModel.uri)
.order_by(DatasetModel.uri)
)
]
return (
htmlsafe_json_dumps(data, separators=(",", ":"), dumps=flask.json.dumps),
{"Content-Type": "application/json; charset=utf-8"},
)
@expose("/object/dataset_dependencies")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
]
)
def dataset_dependencies(self):
"""Returns dataset dependencies graph."""
nodes_dict: dict[str, Any] = {}
edge_tuples: set[dict[str, str]] = set()
for dag, dependencies in SerializedDagModel.get_dag_dependencies().items():
dag_node_id = f"dag:{dag}"
if dag_node_id not in nodes_dict and len(dependencies) > 0:
for dep in dependencies:
if dep.dependency_type == "dag" or dep.dependency_type == "dataset":
nodes_dict[dag_node_id] = node_dict(dag_node_id, dag, "dag")
if dep.node_id not in nodes_dict:
nodes_dict[dep.node_id] = node_dict(
dep.node_id, dep.dependency_id, dep.dependency_type
)
if dep.source != "dataset":
edge_tuples.add((f"dag:{dep.source}", dep.node_id))
if dep.target != "dataset":
edge_tuples.add((dep.node_id, f"dag:{dep.target}"))
nodes = list(nodes_dict.values())
edges = [{"source": source, "target": target} for source, target in edge_tuples]
data = {
"nodes": nodes,
"edges": edges,
}
return (
htmlsafe_json_dumps(data, separators=(",", ":"), dumps=flask.json.dumps),
{"Content-Type": "application/json; charset=utf-8"},
)
@expose("/object/datasets_summary")
@auth.has_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET)])
def datasets_summary(self):
"""Get a summary of datasets, including the datetime they were last updated and how many updates
they've ever had.
"""
allowed_attrs = ["uri", "last_dataset_update"]
# Grab query parameters
limit = int(request.args.get("limit", 25))
offset = int(request.args.get("offset", 0))
order_by = request.args.get("order_by", "uri")
uri_pattern = request.args.get("uri_pattern", "")
lstripped_orderby = order_by.lstrip("-")
updated_after = _safe_parse_datetime(request.args.get("updated_after"), allow_empty=True)
updated_before = _safe_parse_datetime(request.args.get("updated_before"), allow_empty=True)
# Check and clean up query parameters
limit = 50 if limit > 50 else limit
uri_pattern = uri_pattern[:4000]
if lstripped_orderby not in allowed_attrs:
return {
"detail": (
f"Ordering with '{lstripped_orderby}' is disallowed or the attribute does not "
"exist on the model"
)
}, 400
with create_session() as session:
if lstripped_orderby == "uri":
if order_by.startswith("-"):
order_by = (DatasetModel.uri.desc(),)
else:
order_by = (DatasetModel.uri.asc(),)
elif lstripped_orderby == "last_dataset_update":
if order_by.startswith("-"):
order_by = (
func.max(DatasetEvent.timestamp).desc(),
DatasetModel.uri.asc(),
)
if session.bind.dialect.name == "postgresql":
order_by = (order_by[0].nulls_last(), *order_by[1:])
else:
order_by = (
func.max(DatasetEvent.timestamp).asc(),
DatasetModel.uri.desc(),
)
if session.bind.dialect.name == "postgresql":
order_by = (order_by[0].nulls_first(), *order_by[1:])
count_query = select(func.count(DatasetModel.id))
has_event_filters = bool(updated_before or updated_after)
query = (
select(
DatasetModel.id,
DatasetModel.uri,
func.max(DatasetEvent.timestamp).label("last_dataset_update"),
func.sum(case((DatasetEvent.id.is_not(None), 1), else_=0)).label("total_updates"),
)
.join(DatasetEvent, DatasetEvent.dataset_id == DatasetModel.id, isouter=not has_event_filters)
.group_by(
DatasetModel.id,
DatasetModel.uri,
)
.order_by(*order_by)
)
if has_event_filters:
count_query = count_query.join(DatasetEvent, DatasetEvent.dataset_id == DatasetModel.id)
filters = [~DatasetModel.is_orphaned]
if uri_pattern:
filters.append(DatasetModel.uri.ilike(f"%{uri_pattern}%"))
if updated_after:
filters.append(DatasetEvent.timestamp >= updated_after)
if updated_before:
filters.append(DatasetEvent.timestamp <= updated_before)
query = query.where(*filters).offset(offset).limit(limit)
count_query = count_query.where(*filters)
query = session.execute(query)
datasets = [dict(dataset) for dataset in query]
data = {"datasets": datasets, "total_entries": session.scalar(count_query)}
return (
htmlsafe_json_dumps(data, separators=(",", ":"), cls=utils_json.WebEncoder),
{"Content-Type": "application/json; charset=utf-8"},
)
@expose("/robots.txt")
@action_logging
def robots(self):
"""
Returns a robots.txt file for blocking certain search engine crawlers. This mitigates some
of the risk associated with exposing Airflow to the public internet, however it does not
address the real security risks associated with such a deployment.
"""
return send_from_directory(get_airflow_app().static_folder, "robots.txt")
@expose("/audit_log")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
]
)
def legacy_audit_log(self):
"""Redirect from url param."""
return redirect(url_for("Airflow.audit_log", **sanitize_args(request.args)))
@expose("/dags/<string:dag_id>/audit_log")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
]
)
@provide_session
def audit_log(self, dag_id: str, session: Session = NEW_SESSION):
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
dag_model = DagModel.get_dagmodel(dag_id, session=session)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing from DagBag.', "error")
return redirect(url_for("Airflow.index"))
included_events_raw = conf.get("webserver", "audit_view_included_events", fallback=None)
excluded_events_raw = conf.get("webserver", "audit_view_excluded_events", fallback=None)
query = select(Log).where(Log.dag_id == dag_id)
if included_events_raw:
included_events = {event.strip() for event in included_events_raw.split(",")}
query = query.where(Log.event.in_(included_events))
elif excluded_events_raw:
excluded_events = {event.strip() for event in excluded_events_raw.split(",")}
query = query.where(Log.event.notin_(excluded_events))
current_page = request.args.get("page", default=0, type=int)
arg_sorting_key = request.args.get("sorting_key", "dttm")
arg_sorting_direction = request.args.get("sorting_direction", default="desc")
logs_per_page = PAGE_SIZE
audit_logs_count = session.scalar(select(func.count()).select_from(query))
num_of_pages = int(math.ceil(audit_logs_count / float(logs_per_page)))
start = current_page * logs_per_page
end = start + logs_per_page
sort_column = Log.__table__.c.get(arg_sorting_key)
if sort_column is not None:
if arg_sorting_direction == "desc":
sort_column = sort_column.desc()
query = query.order_by(sort_column)
dag_audit_logs = session.scalars(query.offset(start).limit(logs_per_page)).all()
return self.render_template(
"airflow/dag_audit_log.html",
dag=dag,
dag_model=dag_model,
root=request.args.get("root"),
dag_id=dag_id,
dag_logs=dag_audit_logs,
num_log_from=min(start + 1, audit_logs_count),
num_log_to=min(end, audit_logs_count),
audit_logs_count=audit_logs_count,
page_size=PAGE_SIZE,
paging=wwwutils.generate_pages(
current_page,
num_of_pages,
sorting_key=arg_sorting_key if arg_sorting_key else None,
sorting_direction=arg_sorting_direction if arg_sorting_direction else None,
),
sorting_key=arg_sorting_key,
sorting_direction=arg_sorting_direction,
)
class ConfigurationView(AirflowBaseView):
"""View to show Airflow Configurations."""
default_view = "conf"
class_permission_name = permissions.RESOURCE_CONFIG
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
@expose("/configuration")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
]
)
def conf(self):
"""Shows configuration."""
raw = request.args.get("raw") == "true"
title = "Airflow Configuration"
expose_config = conf.get("webserver", "expose_config").lower()
# TODO remove "if raw" usage in Airflow 3.0. Configuration can be fetched via the REST API.
if raw:
if expose_config == "non-sensitive-only":
updater = configupdater.ConfigUpdater()
updater.read(AIRFLOW_CONFIG)
for sect, key in conf.sensitive_config_values:
if updater.has_option(sect, key):
updater[sect][key].value = "< hidden >"
config = str(updater)
elif expose_config in {"true", "t", "1"}:
with open(AIRFLOW_CONFIG) as file:
config = file.read()
else:
config = (
"# Your Airflow administrator chose not to expose the configuration, "
"most likely for security reasons."
)
return Response(
response=config,
status=200,
mimetype="application/text",
headers={"Deprecation": "Endpoint will be removed in Airflow 3.0, use the REST API instead."},
)
if expose_config in {"non-sensitive-only", "true", "t", "1"}:
display_sensitive = expose_config != "non-sensitive-only"
table = [
(section, key, str(value), source)
for section, parameters in conf.as_dict(True, display_sensitive).items()
for key, (value, source) in parameters.items()
]
return self.render_template(
template="airflow/config.html",
title=title,
table=table,
)
else:
return self.render_template(
"airflow/config.html",
title=title,
hide_config_msg=(
"Your Airflow administrator chose not to expose the configuration, "
"most likely for security reasons."
),
)
class RedocView(AirflowBaseView):
"""Redoc Open API documentation."""
default_view = "redoc"
@expose("/redoc")
def redoc(self):
"""Redoc API documentation."""
openapi_spec_url = url_for("/api/v1./api/v1_openapi_yaml")
return self.render_template("airflow/redoc.html", openapi_spec_url=openapi_spec_url)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
"""Filter using DagIDs."""
def apply(self, query, func):
if get_airflow_app().appbuilder.sm.has_all_dags_access(g.user):
return query
filter_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
return query.where(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView):
"""Airflow Mode View.
Overridden `__getattribute__` to wraps REST methods with action_logger
"""
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
def __getattribute__(self, attr):
"""Wraps action REST methods with `action_logging` wrapper
Overriding enables differentiating resource and generation of event name at the decorator level.
if attr in ["show", "list", "read", "get", "get_list"]:
return action_logging(event="RESOURCE_NAME"."action_name")(attr)
else:
return attr
"""
attribute = object.__getattribute__(self, attr)
if (
callable(attribute)
and hasattr(attribute, "_permission_name")
and attribute._permission_name in self.method_permission_name
):
permission_str = self.method_permission_name[attribute._permission_name]
if permission_str not in ["show", "list", "read", "get", "get_list"]:
return action_logging(event=f"{self.route_base.strip('/')}.{permission_str}")(attribute)
return attribute
class AirflowPrivilegeVerifierModelView(AirflowModelView):
"""
This ModelView prevents ability to pass primary keys of objects relating to DAGs you shouldn't be able to
edit. This only holds for the add, update and delete operations.
You will still need to use the `action_has_dag_edit_access()` for actions.
"""
@staticmethod
def validate_dag_edit_access(item: DagRun | TaskInstance):
"""Validates whether the user has 'can_edit' access for this specific DAG."""
if not get_airflow_app().appbuilder.sm.can_edit_dag(item.dag_id):
raise AirflowException(f"Access denied for dag_id {item.dag_id}")
def pre_add(self, item: DagRun | TaskInstance):
self.validate_dag_edit_access(item)
def pre_update(self, item: DagRun | TaskInstance):
self.validate_dag_edit_access(item)
def pre_delete(self, item: DagRun | TaskInstance):
self.validate_dag_edit_access(item)
def post_add_redirect(self): # Required to prevent redirect loop
return redirect(self.get_default_url())
def post_edit_redirect(self): # Required to prevent redirect loop
return redirect(self.get_default_url())
def post_delete_redirect(self): # Required to prevent redirect loop
return redirect(self.get_default_url())
def action_has_dag_edit_access(action_func: Callable) -> Callable:
"""Decorator for actions which verifies you have DAG edit access on the given tis/drs."""
@wraps(action_func)
def check_dag_edit_acl_for_actions(
self,
items: list[TaskInstance] | list[DagRun] | TaskInstance | DagRun | None,
*args,
**kwargs,
) -> Callable:
if items is None:
dag_ids: set[str] = set()
elif isinstance(items, list):
dag_ids = {item.dag_id for item in items if item is not None}
elif isinstance(items, TaskInstance) or isinstance(items, DagRun):
dag_ids = {items.dag_id}
else:
raise ValueError(
"Was expecting the first argument of the action to be of type "
"list[TaskInstance] | list[DagRun] | TaskInstance | DagRun | None."
f"Was of type: {type(items)}"
)
for dag_id in dag_ids:
if not get_airflow_app().appbuilder.sm.can_edit_dag(dag_id):
flash(f"Access denied for dag_id {dag_id}", "danger")
logging.warning("User %s tried to modify %s without having access.", g.user.username, dag_id)
return redirect(self.get_default_url())
return action_func(self, items, *args, **kwargs)
return check_dag_edit_acl_for_actions
class SlaMissModelView(AirflowModelView):
"""View to show SlaMiss table."""
route_base = "/slamiss"
datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss) # type: ignore
class_permission_name = permissions.RESOURCE_SLA_MISS
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ["dag_id", "task_id", "execution_date", "email_sent", "notification_sent", "timestamp"]
label_columns = {
"execution_date": "Logical Date",
}
add_columns = ["dag_id", "task_id", "execution_date", "email_sent", "notification_sent", "timestamp"]
edit_columns = ["dag_id", "task_id", "execution_date", "email_sent", "notification_sent", "timestamp"]
search_columns = ["dag_id", "task_id", "email_sent", "notification_sent", "timestamp", "execution_date"]
base_order = ("execution_date", "desc")
base_filters = [["dag_id", DagFilter, list]]
formatters_columns = {
"task_id": wwwutils.task_instance_link,
"execution_date": wwwutils.datetime_f("execution_date"),
"timestamp": wwwutils.datetime_f("timestamp"),
"dag_id": wwwutils.dag_link,
"map_index": wwwutils.format_map_index,
}
@action("muldelete", "Delete", "Are you sure you want to delete selected records?", single=False)
def action_muldelete(self, items):
"""Multiple delete action."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action(
"mulnotificationsent",
"Set notification sent to true",
"Are you sure you want to set all these notifications to sent?",
single=False,
)
def action_mulnotificationsent(self, items: list[SlaMiss]):
return self._set_notification_property(items, "notification_sent", True)
@action(
"mulnotificationsentfalse",
"Set notification sent to false",
"Are you sure you want to mark these SLA alerts as notification not sent yet?",
single=False,
)
def action_mulnotificationsentfalse(self, items: list[SlaMiss]):
return self._set_notification_property(items, "notification_sent", False)
@action(
"mulemailsent",
"Set email sent to true",
"Are you sure you want to mark these SLA alerts as emails were sent?",
single=False,
)
def action_mulemailsent(self, items: list[SlaMiss]):
return self._set_notification_property(items, "email_sent", True)
@action(
"mulemailsentfalse",
"Set email sent to false",
"Are you sure you want to mark these SLA alerts as emails not sent yet?",
single=False,
)
def action_mulemailsentfalse(self, items: list[SlaMiss]):
return self._set_notification_property(items, "email_sent", False)
@provide_session
def _set_notification_property(
self,
items: list[SlaMiss],
attr: str,
new_value: bool,
session: Session = NEW_SESSION,
):
try:
count = 0
for sla in items:
count += 1
setattr(sla, attr, new_value)
session.merge(sla)
session.commit()
flash(f"{count} SLAMisses had {attr} set to {new_value}.")
except Exception as ex:
flash(str(ex), "error")
flash("Failed to set state", "error")
self.update_redirect()
return redirect(self.get_default_url())
class XComModelView(AirflowModelView):
"""View to show records from XCom table."""
route_base = "/xcom"
list_title = "List XComs"
datamodel = AirflowModelView.CustomSQLAInterface(XCom)
class_permission_name = permissions.RESOURCE_XCOM
method_permission_name = {
"list": "read",
"delete": "delete",
"action_muldelete": "delete",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
search_columns = ["key", "value", "timestamp", "dag_id", "task_id", "run_id", "execution_date"]
list_columns = ["key", "value", "timestamp", "dag_id", "task_id", "run_id", "map_index", "execution_date"]
base_order = ("dag_run_id", "desc")
base_filters = [["dag_id", DagFilter, list]]
formatters_columns = {
"task_id": wwwutils.task_instance_link,
"timestamp": wwwutils.datetime_f("timestamp"),
"dag_id": wwwutils.dag_link,
"map_index": wwwutils.format_map_index,
"execution_date": wwwutils.datetime_f("execution_date"),
}
@action("muldelete", "Delete", "Are you sure you want to delete selected records?", single=False)
def action_muldelete(self, items):
"""Multiple delete action."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pre_add(self, item):
"""Pre add hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(
value=item.value,
key=item.key,
task_id=item.task_id,
dag_id=item.dag_id,
run_id=item.run_id,
map_index=item.map_index,
)
def pre_update(self, item):
"""Pre update hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(
value=item.value,
key=item.key,
task_id=item.task_id,
dag_id=item.dag_id,
run_id=item.run_id,
map_index=item.map_index,
)
# Used to store a dictionary of field behaviours used to dynamically change available
# fields in ConnectionForm based on type of connection chosen
# See airflow.hooks.base_hook.DiscoverableHook for details on how to customize your Hooks.
#
# Additionally, a list of connection types that support testing via Airflow REST API is stored to dynamically
# enable/disable the Test Connection button.
#
# These field behaviours and testable connection types are rendered as scripts in the conn_create.html and
# conn_edit.html templates.
class ConnectionFormWidget(FormWidget):
"""Form widget used to display connection."""
@cached_property
def field_behaviours(self) -> str:
return json.dumps(ProvidersManager().field_behaviours)
@cached_property
def testable_connection_types(self) -> list[str]:
return [
connection_type
for connection_type, hook_info in ProvidersManager().hooks.items()
if hook_info and hook_info.connection_testable
]
class ConnectionFormProxy:
"""A stand-in for the connection form class.
Flask-Appbuilder model views only ever call the ``refresh()`` function on
the form class, so this is the perfect place to make the form generation
dynamic. See docstring of ``create_connection_form_class`` for rationales.
"""
@staticmethod
def refresh(obj=None):
return create_connection_form_class().refresh(obj)
class ConnectionModelView(AirflowModelView):
"""View to show records from Connections table."""
route_base = "/connection"
datamodel = AirflowModelView.CustomSQLAInterface(Connection) # type: ignore
class_permission_name = permissions.RESOURCE_CONNECTION
method_permission_name = {
"add": "create",
"list": "read",
"edit": "edit",
"delete": "delete",
"action_muldelete": "delete",
"action_mulduplicate": "create",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
"conn_id",
"conn_type",
"description",
"host",
"port",
"is_encrypted",
"is_extra_encrypted",
]
# The real add_columns and edit_columns are dynamically generated at runtime
# so we can delay calculating entries relying on providers to make webserver
# start up faster.
_add_columns = _edit_columns = [
"conn_id",
"conn_type",
"description",
"host",
"schema",
"login",
"password",
"port",
"extra",
]
# We will generate the actual ConnectionForm when it is actually needed,
# i.e. when the web form views are displayed and submitted.
add_form = edit_form = ConnectionFormProxy
add_template = "airflow/conn_create.html"
edit_template = "airflow/conn_edit.html"
add_widget = ConnectionFormWidget
edit_widget = ConnectionFormWidget
base_order = ("conn_id", "asc")
def _iter_extra_field_names_and_sensitivity(self) -> Iterator[tuple[str, str, bool]]:
"""Iterate through provider-backed connection fields.
Note that this cannot be a property (including a cached property)
because Flask-Appbuilder attempts to access all members on startup, and
using a property would initialize the providers manager too eagerly.
Returns tuple of:
* key
* field_name
* whether the field is sensitive
"""
return (
(k, v.field_name, v.is_sensitive) for k, v in ProvidersManager().connection_form_widgets.items()
)
@property
def add_columns(self) -> list[str]:
"""A list of columns to show in the Add form.
This dynamically calculates additional fields from providers and add
them to the backing list. This calculation is done exactly once (by
checking we're referencing the class-level variable instead of the
instance-level), and only after we enter the request context (to skip
superfuluous checks done by Flask-Appbuilder on startup).
"""
if self._add_columns is type(self)._add_columns and has_request_context():
self._add_columns = [
*self._add_columns,
*(k for k, _, _ in self._iter_extra_field_names_and_sensitivity()),
]
return self._add_columns
@property
def edit_columns(self) -> list[str]:
"""A list of columns to show in the Edit form.
This dynamically calculates additional fields from providers and add
them to the backing list. This calculation is done exactly once (by
checking we're referencing the class-level variable instead of the
instance-level), and only after we enter the request context (to skip
superfuluous checks done by Flask-Appbuilder on startup).
"""
if self._edit_columns is type(self)._edit_columns and has_request_context():
self._edit_columns = [
*self._edit_columns,
*(k for k, _, _ in self._iter_extra_field_names_and_sensitivity()),
]
return self._edit_columns
@action("muldelete", "Delete", "Are you sure you want to delete selected records?", single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action(
"mulduplicate",
"Duplicate",
"Are you sure you want to duplicate the selected connections?",
single=False,
)
@provide_session
@auth.has_access(
[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
]
)
def action_mulduplicate(self, connections, session: Session = NEW_SESSION):
"""Duplicate Multiple connections."""
for selected_conn in connections:
new_conn_id = selected_conn.conn_id
match = re2.search(r"_copy(\d+)$", selected_conn.conn_id)
base_conn_id = selected_conn.conn_id
if match:
base_conn_id = base_conn_id.split("_copy")[0]
potential_connection_ids = [f"{base_conn_id}_copy{i}" for i in range(1, 11)]
query = session.scalars(
select(Connection.conn_id).where(Connection.conn_id.in_(potential_connection_ids))
)
found_conn_id_set = {conn_id for conn_id in query}
possible_conn_id_iter = (
connection_id
for connection_id in potential_connection_ids
if connection_id not in found_conn_id_set
)
try:
new_conn_id = next(possible_conn_id_iter)
except StopIteration:
flash(
f"Connection {new_conn_id} can't be added because it already exists, "
f"Please rename the existing connections",
"warning",
)
else:
dup_conn = Connection(
new_conn_id,
selected_conn.conn_type,
selected_conn.description,
selected_conn.host,
selected_conn.login,
selected_conn.password,
selected_conn.schema,
selected_conn.port,
selected_conn.extra,
)
try:
session.add(dup_conn)
session.commit()
flash(f"Connection {new_conn_id} added successfully.", "success")
except IntegrityError:
flash(
f"Connection {new_conn_id} can't be added. Integrity error, "
f"probably unique constraint.",
"warning",
)
session.rollback()
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
"""Process form data."""
conn_id = form.data["conn_id"]
conn_type = form.data["conn_type"]
# The extra value is the combination of custom fields for this conn_type and the Extra field.
# The extra form field with all extra values (including custom fields) is in the form being processed
# so we start with those values, and override them with anything in the custom fields.
extra = {}
extra_json = form.data.get("extra")
if extra_json:
try:
extra.update(json.loads(extra_json))
except (JSONDecodeError, TypeError):
flash(
Markup(
"<p>The <em>Extra</em> connection field contained an invalid value for Conn ID: "
"<q>{conn_id}</q>.</p>"
"<p>If connection parameters need to be added to <em>Extra</em>, "
"please make sure they are in the form of a single, valid JSON object.</p><br>"
"The following <em>Extra</em> parameters were <b>not</b> added to the connection:<br>"
"{extra_json}"
).format(conn_id=conn_id, extra_json=extra_json),
category="error",
)
del form.extra
del extra_json
for key, field_name, is_sensitive in self._iter_extra_field_names_and_sensitivity():
if key in form.data and key.startswith("extra__"):
conn_type_from_extra_field = key.split("__")[1]
if conn_type_from_extra_field == conn_type:
value = form.data[key]
# Some extra fields have a default value of False so we need to explicitly check the
# value isn't an empty string.
if value != "":
extra[field_name] = value
if extra.keys():
sensitive_unchanged_keys = set()
for key, value in extra.items():
if value == SENSITIVE_FIELD_PLACEHOLDER:
sensitive_unchanged_keys.add(key)
if sensitive_unchanged_keys:
try:
conn = BaseHook.get_connection(conn_id)
except AirflowNotFoundException:
conn = None
for key in sensitive_unchanged_keys:
if conn and conn.extra_dejson.get(key):
extra[key] = conn.extra_dejson.get(key)
else:
del extra[key]
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
"""Prefill the form."""
try:
extra = form.data.get("extra")
if extra is None:
extra_dictionary = {}
else:
extra_dictionary = json.loads(extra)
except JSONDecodeError:
extra_dictionary = {}
if not isinstance(extra_dictionary, dict):
logging.warning("extra field for %s is not a dictionary", form.data.get("conn_id", "<unknown>"))
return
for field_key, field_name, is_sensitive in self._iter_extra_field_names_and_sensitivity():
value = extra_dictionary.get(field_name, "")
if not value:
# check if connection `extra` json is using old prefixed field name style
value = extra_dictionary.get(field_key, "")
if value:
field = getattr(form, field_key)
field.data = value
if is_sensitive and field_name in extra_dictionary:
extra_dictionary[field_name] = SENSITIVE_FIELD_PLACEHOLDER
# form.data is a property that builds the dictionary from fields so we have to modify the fields
form.extra.data = json.dumps(extra_dictionary)
class PluginView(AirflowBaseView):
"""View to show Airflow Plugins."""
default_view = "list"
class_permission_name = permissions.RESOURCE_PLUGIN
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
plugins_attributes_to_dump = [
"hooks",
"executors",
"macros",
"admin_views",
"flask_blueprints",
"menu_links",
"appbuilder_views",
"appbuilder_menu_items",
"global_operator_extra_links",
"operator_extra_links",
"source",
]
@expose("/plugin")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
]
)
def list(self):
"""List loaded plugins."""
plugins_manager.ensure_plugins_loaded()
plugins_manager.integrate_executor_plugins()
plugins_manager.initialize_extra_operators_links_plugins()
plugins_manager.initialize_web_ui_plugins()
plugins = []
for plugin_no, plugin in enumerate(plugins_manager.plugins, 1):
plugin_data = {
"plugin_no": plugin_no,
"plugin_name": plugin.name,
"attrs": {},
}
for attr_name in self.plugins_attributes_to_dump:
attr_value = getattr(plugin, attr_name)
plugin_data["attrs"][attr_name] = attr_value
plugins.append(plugin_data)
title = "Airflow Plugins"
doc_url = get_docs_url("plugins.html")
return self.render_template(
"airflow/plugin.html",
plugins=plugins,
title=title,
doc_url=doc_url,
)
class ProviderView(AirflowBaseView):
"""View to show Airflow Providers."""
default_view = "list"
class_permission_name = permissions.RESOURCE_PROVIDER
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
@expose("/provider")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
]
)
def list(self):
"""List providers."""
providers_manager = ProvidersManager()
providers = []
for pi in providers_manager.providers.values():
provider_info = pi.data
provider_data = {
"package_name": provider_info["package-name"],
"description": self._clean_description(provider_info["description"]),
"version": pi.version,
"documentation_url": get_doc_url_for_provider(provider_info["package-name"], pi.version),
}
providers.append(provider_data)
title = "Providers"
doc_url = get_docs_url("apache-airflow-providers/index.html")
return self.render_template(
"airflow/providers.html",
providers=providers,
title=title,
doc_url=doc_url,
)
def _clean_description(self, description):
def _build_link(match_obj):
text = match_obj.group(1)
url = match_obj.group(2)
return Markup(f'<a href="{url}">{text}</a>')
cd = escape(description)
cd = re2.sub(r"`(.*)[\s+]+<(.*)>`__", _build_link, cd)
cd = re2.sub(r"\n", r"<br>", cd)
return Markup(cd)
class PoolModelView(AirflowModelView):
"""View to show records from Pool table."""
route_base = "/pool"
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool) # type: ignore
class_permission_name = permissions.RESOURCE_POOL
method_permission_name = {
"add": "create",
"list": "read",
"edit": "edit",
"delete": "delete",
"action_muldelete": "delete",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ["pool", "slots", "running_slots", "queued_slots", "scheduled_slots"]
add_columns = ["pool", "slots", "description"]
edit_columns = ["pool", "slots", "description"]
base_order = ("pool", "asc")
@action("muldelete", "Delete", "Are you sure you want to delete selected records?", single=False)
def action_muldelete(self, items):
"""Multiple delete."""
if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):
flash(f"{models.Pool.DEFAULT_POOL_NAME} cannot be deleted", "error")
self.update_redirect()
return redirect(self.get_redirect())
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@expose("/delete/<pk>", methods=["GET", "POST"])
@has_access
def delete(self, pk):
"""Single delete."""
if models.Pool.is_default_pool(pk):
flash(f"{models.Pool.DEFAULT_POOL_NAME} cannot be deleted", "error")
self.update_redirect()
return redirect(self.get_redirect())
return super().delete(pk)
def pool_link(self):
"""Pool link rendering."""
pool_id = self.get("pool")
if pool_id is not None:
url = url_for("TaskInstanceModelView.list", _flt_3_pool=pool_id)
return Markup("<a href='{url}'>{pool_id}</a>").format(url=url, pool_id=pool_id)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def frunning_slots(self):
"""Running slots rendering."""
pool_id = self.get("pool")
running_slots = self.get("running_slots")
if pool_id is not None and running_slots is not None:
url = url_for("TaskInstanceModelView.list", _flt_3_pool=pool_id, _flt_3_state="running")
return Markup("<a href='{url}'>{running_slots}</a>").format(url=url, running_slots=running_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(self):
"""Queued slots rendering."""
pool_id = self.get("pool")
queued_slots = self.get("queued_slots")
if pool_id is not None and queued_slots is not None:
url = url_for("TaskInstanceModelView.list", _flt_3_pool=pool_id, _flt_3_state="queued")
return Markup("<a href='{url}'>{queued_slots}</a>").format(url=url, queued_slots=queued_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fscheduled_slots(self):
"""Scheduled slots rendering."""
pool_id = self.get("pool")
scheduled_slots = self.get("scheduled_slots")
if pool_id is not None and scheduled_slots is not None:
url = url_for("TaskInstanceModelView.list", _flt_3_pool=pool_id, _flt_3_state="scheduled")
return Markup("<a href='{url}'>{scheduled_slots}</a>").format(
url=url, scheduled_slots=scheduled_slots
)
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
"pool": pool_link,
"running_slots": frunning_slots,
"queued_slots": fqueued_slots,
"scheduled_slots": fscheduled_slots,
}
validators_columns = {"pool": [validators.DataRequired()], "slots": [validators.NumberRange(min=-1)]}
def _can_create_variable() -> bool:
return get_airflow_app().appbuilder.sm.has_access(
permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE
)
class VariableModelView(AirflowModelView):
"""View to show records from Variable table."""
route_base = "/variable"
list_template = "airflow/variable_list.html"
edit_template = "airflow/variable_edit.html"
show_template = "airflow/variable_show.html"
show_widget = AirflowVariableShowWidget
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable) # type: ignore
class_permission_name = permissions.RESOURCE_VARIABLE
method_permission_name = {
"add": "create",
"list": "read",
"edit": "edit",
"show": "read",
"delete": "delete",
"action_muldelete": "delete",
"action_varexport": "read",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ["key", "val", "description", "is_encrypted"]
add_columns = ["key", "val", "description"]
edit_columns = ["key", "val", "description"]
show_columns = ["key", "val", "description"]
search_columns = ["key", "val"]
base_order = ("key", "asc")
def hidden_field_formatter(self):
"""Formats hidden fields."""
key = self.get("key")
val = self.get("val")
if secrets_masker.should_hide_value_for_key(key):
return Markup("*" * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
"val": hidden_field_formatter,
}
validators_columns = {"key": [validators.DataRequired()]}
def prefill_form(self, form, request_id):
if secrets_masker.should_hide_value_for_key(form.key.data):
form.val.data = "*" * 8
def prefill_show(self, item):
if secrets_masker.should_hide_value_for_key(item.key):
item.val = "*" * 8
def _show(self, pk):
pages = get_page_args()
page_sizes = get_page_size_args()
orders = get_order_args()
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
self.prefill_show(item)
widgets = self._get_show_widget(pk, item)
self.update_redirect()
return self._get_related_views_widgets(
item, orders=orders, pages=pages, page_sizes=page_sizes, widgets=widgets
)
extra_args = {"can_create_variable": _can_create_variable}
@action("muldelete", "Delete", "Are you sure you want to delete selected records?", single=False)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action("varexport", "Export", "", single=False)
def action_varexport(self, items):
"""Export variables."""
var_dict = {}
decoder = json.JSONDecoder()
for var in items:
try:
val = decoder.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
@expose("/varimport", methods=["POST"])
@auth.has_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE)])
@action_logging(event=f"{permissions.RESOURCE_VARIABLE.lower()}.varimport")
def varimport(self):
"""Import variables."""
try:
variable_dict = json.loads(request.files["file"].read())
except Exception:
self.update_redirect()
flash("Missing file or syntax error.", "error")
return redirect(self.get_redirect())
else:
suc_count = fail_count = 0
for k, v in variable_dict.items():
try:
models.Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e:
logging.info("Variable import failed: %s", repr(e))
fail_count += 1
else:
suc_count += 1
flash(f"{suc_count} variable(s) successfully updated.")
if fail_count:
flash(f"{fail_count} variable(s) failed to be updated.", "error")
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
"""View to show records from Job table."""
route_base = "/job"
datamodel = AirflowModelView.CustomSQLAInterface(Job) # type: ignore
class_permission_name = permissions.RESOURCE_JOB
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
"id",
"dag_id",
"state",
"job_type",
"start_date",
"end_date",
"latest_heartbeat",
"executor_class",
"hostname",
"unixname",
]
search_columns = [
"id",
"dag_id",
"state",
"job_type",
"start_date",
"end_date",
"latest_heartbeat",
"executor_class",
"hostname",
"unixname",
]
base_order = ("start_date", "desc")
base_filters = [["dag_id", DagFilter, list]]
formatters_columns = {
"start_date": wwwutils.datetime_f("start_date"),
"end_date": wwwutils.datetime_f("end_date"),
"hostname": wwwutils.nobr_f("hostname"),
"state": wwwutils.state_f,
"latest_heartbeat": wwwutils.datetime_f("latest_heartbeat"),
}
class DagRunModelView(AirflowPrivilegeVerifierModelView):
"""View to show records from DagRun table."""
route_base = "/dagrun"
datamodel = wwwutils.DagRunCustomSQLAInterface(models.DagRun) # type: ignore
class_permission_name = permissions.RESOURCE_DAG_RUN
method_permission_name = {
"list": "read",
"action_clear": "edit",
"action_muldelete": "delete",
"action_set_queued": "edit",
"action_set_running": "edit",
"action_set_failed": "edit",
"action_set_success": "edit",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
"state",
"dag_id",
"execution_date",
"run_id",
"run_type",
"queued_at",
"start_date",
"end_date",
"note",
"external_trigger",
"conf",
"duration",
]
search_columns = [
"state",
"dag_id",
"execution_date",
"run_id",
"run_type",
"start_date",
"end_date",
"note",
"external_trigger",
]
label_columns = {
"execution_date": "Logical Date",
}
edit_columns = [
"state",
"dag_id",
"execution_date",
"start_date",
"end_date",
"run_id",
"conf",
"note",
]
# duration is not a DB column, its derived
order_columns = [
"state",
"dag_id",
"execution_date",
"run_id",
"run_type",
"queued_at",
"start_date",
"end_date",
# "note", # todo: maybe figure out how to re-enable this
"external_trigger",
"conf",
]
base_order = ("execution_date", "desc")
base_filters = [["dag_id", DagFilter, list]]
edit_form = DagRunEditForm
def duration_f(self):
"""Duration calculation."""
end_date = self.get("end_date")
start_date = self.get("start_date")
difference = "0s"
if start_date and end_date:
difference = td_format(end_date - start_date)
return difference
formatters_columns = {
"execution_date": wwwutils.datetime_f("execution_date"),
"state": wwwutils.state_f,
"start_date": wwwutils.datetime_f("start_date"),
"end_date": wwwutils.datetime_f("end_date"),
"queued_at": wwwutils.datetime_f("queued_at"),
"dag_id": wwwutils.dag_link,
"run_id": wwwutils.dag_run_link,
"conf": wwwutils.json_f("conf"),
"duration": duration_f,
}
@action("muldelete", "Delete", "Are you sure you want to delete selected records?", single=False)
@action_has_dag_edit_access
@action_logging
def action_muldelete(self, items: list[DagRun]):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action("set_queued", "Set state to 'queued'", "", single=False)
@action_has_dag_edit_access
@action_logging
def action_set_queued(self, drs: list[DagRun]):
"""Set state to queued."""
return self._set_dag_runs_to_active_state(drs, DagRunState.QUEUED)
@action("set_running", "Set state to 'running'", "", single=False)
@action_has_dag_edit_access
@action_logging
def action_set_running(self, drs: list[DagRun]):
"""Set state to running."""
return self._set_dag_runs_to_active_state(drs, DagRunState.RUNNING)
@provide_session
def _set_dag_runs_to_active_state(
self,
drs: list[DagRun],
state: DagRunState,
session: Session = NEW_SESSION,
):
"""This routine only supports Running and Queued state."""
try:
count = 0
for dr in session.scalars(select(DagRun).where(DagRun.id.in_(dagrun.id for dagrun in drs))):
count += 1
if state == DagRunState.RUNNING:
dr.start_date = timezone.utcnow()
dr.state = state
session.commit()
flash(f"{count} dag runs were set to {state}.")
except Exception as ex:
flash(str(ex), "error")
flash("Failed to set state", "error")
return redirect(self.get_default_url())
@action(
"set_failed",
"Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False,
)
@action_has_dag_edit_access
@provide_session
@action_logging
def action_set_failed(self, drs: list[DagRun], session: Session = NEW_SESSION):
"""Set state to failed."""
try:
count = 0
altered_tis = []
for dr in session.scalars(select(DagRun).where(DagRun.id.in_(dagrun.id for dagrun in drs))):
count += 1
altered_tis += set_dag_run_state_to_failed(
dag=get_airflow_app().dag_bag.get_dag(dr.dag_id),
run_id=dr.run_id,
commit=True,
session=session,
)
altered_ti_count = len(altered_tis)
flash(f"{count} dag runs and {altered_ti_count} task instances were set to failed")
except Exception:
flash("Failed to set state", "error")
return redirect(self.get_default_url())
@action(
"set_success",
"Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False,
)
@action_has_dag_edit_access
@provide_session
@action_logging
def action_set_success(self, drs: list[DagRun], session: Session = NEW_SESSION):
"""Set state to success."""
try:
count = 0
altered_tis = []
for dr in session.scalars(select(DagRun).where(DagRun.id.in_(dagrun.id for dagrun in drs))):
count += 1
altered_tis += set_dag_run_state_to_success(
dag=get_airflow_app().dag_bag.get_dag(dr.dag_id),
run_id=dr.run_id,
commit=True,
session=session,
)
altered_ti_count = len(altered_tis)
flash(f"{count} dag runs and {altered_ti_count} task instances were set to success")
except Exception:
flash("Failed to set state", "error")
return redirect(self.get_default_url())
@action("clear", "Clear the state", "All task instances would be cleared, are you sure?", single=False)
@action_has_dag_edit_access
@provide_session
@action_logging
def action_clear(self, drs: list[DagRun], session: Session = NEW_SESSION):
"""Clears the state."""
try:
count = 0
cleared_ti_count = 0
dag_to_tis: dict[DAG, list[TaskInstance]] = {}
for dr in session.scalars(select(DagRun).where(DagRun.id.in_(dagrun.id for dagrun in drs))):
count += 1
dag = get_airflow_app().dag_bag.get_dag(dr.dag_id)
tis_to_clear = dag_to_tis.setdefault(dag, [])
tis_to_clear += dr.get_task_instances()
for dag, tis in dag_to_tis.items():
cleared_ti_count += len(tis)
models.clear_task_instances(tis, session, dag=dag)
flash(f"{count} dag runs and {cleared_ti_count} task instances were cleared")
except Exception:
flash("Failed to clear state", "error")
return redirect(self.get_default_url())
class LogModelView(AirflowModelView):
"""View to show records from Log table."""
route_base = "/log"
datamodel = AirflowModelView.CustomSQLAInterface(Log) # type:ignore
class_permission_name = permissions.RESOURCE_AUDIT_LOG
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ["id", "dttm", "dag_id", "task_id", "event", "execution_date", "owner", "extra"]
search_columns = ["dttm", "dag_id", "task_id", "event", "execution_date", "owner", "extra"]
label_columns = {
"execution_date": "Logical Date",
}
base_order = ("dttm", "desc")
base_filters = [["dag_id", DagFilter, list]]
formatters_columns = {
"dttm": wwwutils.datetime_f("dttm"),
"execution_date": wwwutils.datetime_f("execution_date"),
"dag_id": wwwutils.dag_link,
}
class TaskRescheduleModelView(AirflowModelView):
"""View to show records from Task Reschedule table."""
route_base = "/taskreschedule"
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskReschedule) # type: ignore
related_views = [DagRunModelView]
class_permission_name = permissions.RESOURCE_TASK_RESCHEDULE
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
"id",
"dag_id",
"run_id",
"dag_run.execution_date",
"task_id",
"map_index",
"try_number",
"start_date",
"end_date",
"duration",
"reschedule_date",
]
label_columns = {
"dag_run.execution_date": "Logical Date",
}
search_columns = [
"dag_id",
"task_id",
"run_id",
"execution_date",
"start_date",
"end_date",
"reschedule_date",
]
base_order = ("id", "desc")
base_filters = [["dag_id", DagFilter, list]]
def duration_f(self):
"""Duration calculation."""
end_date = self.get("end_date")
duration = self.get("duration")
if end_date and duration:
return td_format(datetime.timedelta(seconds=duration))
return None
formatters_columns = {
"dag_id": wwwutils.dag_link,
"task_id": wwwutils.task_instance_link,
"start_date": wwwutils.datetime_f("start_date"),
"end_date": wwwutils.datetime_f("end_date"),
"dag_run.execution_date": wwwutils.datetime_f("dag_run.execution_date"),
"reschedule_date": wwwutils.datetime_f("reschedule_date"),
"duration": duration_f,
"map_index": wwwutils.format_map_index,
}
class TriggerModelView(AirflowModelView):
"""View to show records from Task Reschedule table."""
route_base = "/triggerview"
datamodel = AirflowModelView.CustomSQLAInterface(models.Trigger) # type: ignore
class_permission_name = permissions.RESOURCE_TRIGGER
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
"id",
"classpath",
"created_date",
"triggerer_id",
]
search_columns = [
"id",
"classpath",
"created_date",
"triggerer_id",
]
base_order = ("id", "created_date")
formatters_columns = {
"created_date": wwwutils.datetime_f("created_date"),
}
class TaskInstanceModelView(AirflowPrivilegeVerifierModelView):
"""View to show records from TaskInstance table."""
route_base = "/taskinstance"
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance) # type: ignore
class_permission_name = permissions.RESOURCE_TASK_INSTANCE
method_permission_name = {
"list": "read",
"action_clear": "edit",
"action_muldelete": "delete",
"action_set_running": "edit",
"action_set_failed": "edit",
"action_set_success": "edit",
"action_set_retry": "edit",
"action_set_skipped": "edit",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
page_size = PAGE_SIZE
list_columns = [
"state",
"dag_id",
"task_id",
"run_id",
"map_index",
"dag_run.execution_date",
"operator",
"start_date",
"end_date",
"duration",
"note",
"job_id",
"hostname",
"unixname",
"priority_weight",
"queue",
"queued_dttm",
"try_number",
"pool",
"queued_by_job_id",
"external_executor_id",
"log_url",
]
order_columns = [
"state",
"dag_id",
"task_id",
"run_id",
"map_index",
"dag_run.execution_date",
"operator",
"start_date",
"end_date",
"duration",
# "note", # TODO: Maybe figure out how to re-enable this.
"job_id",
"hostname",
"unixname",
"priority_weight",
"queue",
"queued_dttm",
"pool",
"queued_by_job_id",
]
label_columns = {
"dag_run.execution_date": "Logical Date",
}
search_columns = [
"state",
"dag_id",
"task_id",
"run_id",
"map_index",
"execution_date",
"operator",
"start_date",
"end_date",
"note",
"hostname",
"priority_weight",
"queue",
"queued_dttm",
"try_number",
"pool",
"queued_by_job_id",
]
edit_columns = [
"dag_id",
"task_id",
"execution_date",
"start_date",
"end_date",
"state",
"note",
]
add_exclude_columns = ["next_method", "next_kwargs", "trigger_id"]
edit_form = TaskInstanceEditForm
base_order = ("job_id", "asc")
base_filters = [["dag_id", DagFilter, list]]
def log_url_formatter(self):
"""Formats log URL."""
log_url = self.get("log_url")
return Markup(
'<a href="{log_url}"><span class="material-icons" aria-hidden="true">reorder</span></a>'
).format(log_url=log_url)
def duration_f(self):
"""Formats duration."""
end_date = self.get("end_date")
duration = self.get("duration")
if end_date and duration:
return td_format(datetime.timedelta(seconds=duration))
return None
formatters_columns = {
"log_url": log_url_formatter,
"task_id": wwwutils.task_instance_link,
"run_id": wwwutils.dag_run_link,
"map_index": wwwutils.format_map_index,
"hostname": wwwutils.nobr_f("hostname"),
"state": wwwutils.state_f,
"dag_run.execution_date": wwwutils.datetime_f("dag_run.execution_date"),
"start_date": wwwutils.datetime_f("start_date"),
"end_date": wwwutils.datetime_f("end_date"),
"queued_dttm": wwwutils.datetime_f("queued_dttm"),
"dag_id": wwwutils.dag_link,
"duration": duration_f,
}
@action(
"clear",
lazy_gettext("Clear"),
lazy_gettext(
"Are you sure you want to clear the state of the selected task"
" instance(s) and set their dagruns to the QUEUED state?"
),
single=False,
)
@action_has_dag_edit_access
@provide_session
@action_logging
def action_clear(self, task_instances, session: Session = NEW_SESSION):
"""Clears the action."""
try:
dag_to_tis = collections.defaultdict(list)
for ti in task_instances:
dag = get_airflow_app().dag_bag.get_dag(ti.dag_id)
dag_to_tis[dag].append(ti)
for dag, task_instances_list in dag_to_tis.items():
models.clear_task_instances(task_instances_list, session, dag=dag)
session.commit()
flash(f"{len(task_instances)} task instances have been cleared")
except Exception as e:
flash(f'Failed to clear task instances: "{e}"', "error")
self.update_redirect()
return redirect(self.get_redirect())
@action("muldelete", "Delete", "Are you sure you want to delete selected records?", single=False)
@action_has_dag_edit_access
@action_logging
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@provide_session
def set_task_instance_state(
self,
tis: Collection[TaskInstance],
target_state: TaskInstanceState,
session: Session = NEW_SESSION,
) -> None:
"""Set task instance state."""
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session)
session.commit()
flash(f"{count} task instances were set to '{target_state}'")
except Exception:
flash("Failed to set state", "error")
@action("set_running", "Set state to 'running'", "", single=False)
@action_has_dag_edit_access
@action_logging
def action_set_running(self, tis):
"""Set state to 'running'."""
self.set_task_instance_state(tis, TaskInstanceState.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action("set_failed", "Set state to 'failed'", "", single=False)
@action_has_dag_edit_access
@action_logging
def action_set_failed(self, tis):
"""Set state to 'failed'."""
self.set_task_instance_state(tis, TaskInstanceState.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action("set_success", "Set state to 'success'", "", single=False)
@action_has_dag_edit_access
@action_logging
def action_set_success(self, tis):
"""Set state to 'success'."""
self.set_task_instance_state(tis, TaskInstanceState.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action("set_retry", "Set state to 'up_for_retry'", "", single=False)
@action_has_dag_edit_access
@action_logging
def action_set_retry(self, tis):
"""Set state to 'up_for_retry'."""
self.set_task_instance_state(tis, TaskInstanceState.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
@action("set_skipped", "Set state to 'skipped'", "", single=False)
@action_has_dag_edit_access
@action_logging
def action_set_skipped(self, tis):
"""Set state to skipped."""
self.set_task_instance_state(tis, TaskInstanceState.SKIPPED)
self.update_redirect()
return redirect(self.get_redirect())
class AutocompleteView(AirflowBaseView):
"""View to provide autocomplete results."""
@auth.has_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
@provide_session
@expose("/dagmodel/autocomplete")
def autocomplete(self, session: Session = NEW_SESSION):
"""Autocomplete."""
query = unquote(request.args.get("query", ""))
if not query:
return flask.json.jsonify([])
# Provide suggestions of dag_ids and owners
dag_ids_query = select(
sqla.literal("dag").label("type"),
DagModel.dag_id.label("name"),
).where(~DagModel.is_subdag, DagModel.is_active, DagModel.dag_id.ilike(f"%{query}%"))
owners_query = (
select(
sqla.literal("owner").label("type"),
DagModel.owners.label("name"),
)
.distinct()
.where(~DagModel.is_subdag, DagModel.is_active, DagModel.owners.ilike(f"%{query}%"))
)
# Hide DAGs if not showing status: "all"
status = flask_session.get(FILTER_STATUS_COOKIE)
if status == "active":
dag_ids_query = dag_ids_query.where(~DagModel.is_paused)
owners_query = owners_query.where(~DagModel.is_paused)
elif status == "paused":
dag_ids_query = dag_ids_query.where(DagModel.is_paused)
owners_query = owners_query.where(DagModel.is_paused)
filter_dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
dag_ids_query = dag_ids_query.where(DagModel.dag_id.in_(filter_dag_ids))
owners_query = owners_query.where(DagModel.dag_id.in_(filter_dag_ids))
payload = [
row._asdict()
for row in session.execute(dag_ids_query.union(owners_query).order_by("name").limit(10))
]
return flask.json.jsonify(payload)
class DagDependenciesView(AirflowBaseView):
"""View to show dependencies between DAGs."""
refresh_interval = datetime.timedelta(
seconds=conf.getint(
"webserver",
"dag_dependencies_refresh_interval",
fallback=conf.getint("scheduler", "dag_dir_list_interval"),
)
)
last_refresh = timezone.utcnow() - refresh_interval
nodes: list[dict[str, Any]] = []
edges: list[dict[str, str]] = []
@expose("/dag-dependencies")
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
]
)
@gzipped
@action_logging
def list(self):
"""Display DAG dependencies."""
title = "DAG Dependencies"
if not self.nodes or not self.edges:
self._calculate_graph()
self.last_refresh = timezone.utcnow()
elif timezone.utcnow() > self.last_refresh + self.refresh_interval:
max_last_updated = SerializedDagModel.get_max_last_updated_datetime()
if max_last_updated is None or max_last_updated > self.last_refresh:
self._calculate_graph()
self.last_refresh = timezone.utcnow()
return self.render_template(
"airflow/dag_dependencies.html",
title=title,
nodes=self.nodes,
edges=self.edges,
last_refresh=self.last_refresh,
arrange=conf.get("webserver", "dag_orientation"),
width=request.args.get("width", "100%"),
height=request.args.get("height", "800"),
)
def _calculate_graph(self):
nodes_dict: dict[str, Any] = {}
edge_tuples: set[dict[str, str]] = set()
for dag, dependencies in SerializedDagModel.get_dag_dependencies().items():
dag_node_id = f"dag:{dag}"
if dag_node_id not in nodes_dict:
nodes_dict[dag_node_id] = node_dict(dag_node_id, dag, "dag")
for dep in dependencies:
if dep.node_id not in nodes_dict:
nodes_dict[dep.node_id] = node_dict(dep.node_id, dep.dependency_id, dep.dependency_type)
edge_tuples.add((f"dag:{dep.source}", dep.node_id))
edge_tuples.add((dep.node_id, f"dag:{dep.target}"))
self.nodes = list(nodes_dict.values())
self.edges = [{"u": u, "v": v} for u, v in edge_tuples]
def add_user_permissions_to_dag(sender, template, context, **extra):
"""
Adds `.can_edit`, `.can_trigger`, and `.can_delete` properties
to DAG based on current user's permissions.
Located in `views.py` rather than the DAG model to keep
permissions logic out of the Airflow core.
"""
if "dag" not in context:
return
dag = context["dag"]
can_create_dag_run = get_airflow_app().appbuilder.sm.has_access(
permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN
)
dag.can_edit = get_airflow_app().appbuilder.sm.can_edit_dag(dag.dag_id)
dag.can_trigger = dag.can_edit and can_create_dag_run
dag.can_delete = get_airflow_app().appbuilder.sm.can_delete_dag(dag.dag_id)
context["dag"] = dag
# NOTE: Put this at the end of the file. Pylance is too clever and detects that
# before_render_template.connect() is declared as NoReturn, and marks everything
# after this line as unreachable code. It's technically correct based on the
# lint-time information, but that's not what actually happens at runtime.
before_render_template.connect(add_user_permissions_to_dag)
| 230,529 | 36.897419 | 110 | py |
airflow | airflow-main/airflow/www/widgets.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from flask_appbuilder.fieldwidgets import BS3TextAreaFieldWidget, BS3TextFieldWidget
from flask_appbuilder.widgets import RenderTemplateWidget
from markupsafe import Markup
from wtforms.widgets import html_params
class AirflowModelListWidget(RenderTemplateWidget):
"""Airflow model list."""
template = "airflow/model_list.html"
class AirflowDateTimePickerWidget:
"""Airflow date time picker widget."""
data_template = (
'<div class="input-group datetime datetimepicker">'
'<span class="input-group-addon"><span class="material-icons cursor-hand">calendar_today</span>'
"</span>"
'<input class="form-control" %(text)s />'
"</div>"
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
if not field.data:
field.data = ""
template = self.data_template
return Markup(
template % {"text": html_params(type="text", value=field.data, required=True, **kwargs)}
)
class AirflowDateTimePickerROWidget(AirflowDateTimePickerWidget):
"""Airflow Read-only date time picker widget."""
def __call__(self, field, **kwargs):
kwargs["readonly"] = "true"
return super().__call__(field, **kwargs)
class BS3TextFieldROWidget(BS3TextFieldWidget):
"""Read-only single-line text input Widget (BS3TextFieldWidget)."""
def __call__(self, field, **kwargs):
kwargs["readonly"] = "true"
return super().__call__(field, **kwargs)
class BS3TextAreaROWidget(BS3TextAreaFieldWidget):
"""Read-only multi-line text area Widget (BS3TextAreaROWidget)."""
def __call__(self, field, **kwargs):
kwargs["readonly"] = "true"
return super().__call__(field, **kwargs)
class AirflowVariableShowWidget(RenderTemplateWidget):
"""Airflow variable show widget."""
template = "airflow/variable_show_widget.html"
| 2,776 | 32.457831 | 104 | py |
airflow | airflow-main/airflow/www/validators.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from json import JSONDecodeError
from wtforms.validators import EqualTo, ValidationError
from airflow.utils import helpers
class GreaterEqualThan(EqualTo):
"""Compares the values of two fields.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise ValidationError(field.gettext(f"Invalid field name '{self.fieldname}'."))
if field.data is None or other.data is None:
return
if field.data < other.data:
message_args = {
"other_label": hasattr(other, "label") and other.label.text or self.fieldname,
"other_name": self.fieldname,
}
message = self.message
if message is None:
message = field.gettext(
f"Field must be greater than or equal to {message_args['other_label']}."
)
else:
message = message % message_args
raise ValidationError(message)
class ValidJson:
"""Validates data is valid JSON.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if field.data:
try:
json.loads(field.data)
except JSONDecodeError as ex:
message = self.message or f"JSON Validation Error: {ex}"
raise ValidationError(message=field.gettext(message.format(field.data)))
class ValidKey:
"""
Validates values that will be used as keys.
:param max_length:
The maximum length of the given key
"""
def __init__(self, max_length=200):
self.max_length = max_length
def __call__(self, form, field):
if field.data:
try:
helpers.validate_key(field.data, self.max_length)
except Exception as e:
raise ValidationError(str(e))
| 3,138 | 30.39 | 94 | py |
airflow | airflow-main/airflow/www/gunicorn_config.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import setproctitle
from airflow import settings
def post_worker_init(_):
"""
Set process title.
This is used by airflow.cli.commands.webserver_command to track the status of the worker.
"""
old_title = setproctitle.getproctitle()
setproctitle.setproctitle(settings.GUNICORN_WORKER_READY_PREFIX + old_title)
def on_starting(server):
from airflow.providers_manager import ProvidersManager
# Load providers before forking workers
ProvidersManager().connection_form_widgets
| 1,359 | 32.170732 | 93 | py |
airflow | airflow-main/airflow/www/app.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from datetime import timedelta
from flask import Flask
from flask_appbuilder import SQLA
from flask_wtf.csrf import CSRFProtect
from markupsafe import Markup
from sqlalchemy.engine.url import make_url
from airflow import settings
from airflow.api_internal.internal_api_call import InternalApiConfig
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException, RemovedInAirflow3Warning
from airflow.logging_config import configure_logging
from airflow.models import import_all_models
from airflow.settings import _ENABLE_AIP_44
from airflow.utils.json import AirflowJsonProvider
from airflow.www.extensions.init_appbuilder import init_appbuilder
from airflow.www.extensions.init_appbuilder_links import init_appbuilder_links
from airflow.www.extensions.init_cache import init_cache
from airflow.www.extensions.init_dagbag import init_dagbag
from airflow.www.extensions.init_jinja_globals import init_jinja_globals
from airflow.www.extensions.init_manifest_files import configure_manifest_files
from airflow.www.extensions.init_robots import init_robots
from airflow.www.extensions.init_security import (
init_api_experimental_auth,
init_check_user_active,
init_xframe_protection,
)
from airflow.www.extensions.init_session import init_airflow_session_interface
from airflow.www.extensions.init_views import (
init_api_connexion,
init_api_experimental,
init_api_internal,
init_appbuilder_views,
init_error_handlers,
init_flash_views,
init_plugins,
)
from airflow.www.extensions.init_wsgi_middlewares import init_wsgi_middleware
app: Flask | None = None
# Initializes at the module level, so plugins can access it.
# See: /docs/plugins.rst
csrf = CSRFProtect()
def sync_appbuilder_roles(flask_app):
"""Sync appbuilder roles to DB."""
# Garbage collect old permissions/views after they have been modified.
# Otherwise, when the name of a view or menu is changed, the framework
# will add the new Views and Menus names to the backend, but will not
# delete the old ones.
if conf.getboolean("webserver", "UPDATE_FAB_PERMS"):
flask_app.appbuilder.sm.sync_roles()
def create_app(config=None, testing=False):
"""Create a new instance of Airflow WWW app."""
flask_app = Flask(__name__)
flask_app.secret_key = conf.get("webserver", "SECRET_KEY")
flask_app.config["PERMANENT_SESSION_LIFETIME"] = timedelta(minutes=settings.get_session_lifetime_config())
flask_app.config.from_pyfile(settings.WEBSERVER_CONFIG, silent=True)
flask_app.config["TESTING"] = testing
flask_app.config["SQLALCHEMY_DATABASE_URI"] = conf.get("database", "SQL_ALCHEMY_CONN")
instance_name = conf.get(section="webserver", key="instance_name", fallback="Airflow")
instance_name_has_markup = conf.getboolean(
section="webserver", key="instance_name_has_markup", fallback=False
)
if instance_name_has_markup:
instance_name = Markup(instance_name).striptags()
flask_app.config["APP_NAME"] = instance_name
url = make_url(flask_app.config["SQLALCHEMY_DATABASE_URI"])
if url.drivername == "sqlite" and url.database and not url.database.startswith("/"):
raise AirflowConfigException(
f'Cannot use relative path: `{conf.get("database", "SQL_ALCHEMY_CONN")}` to connect to sqlite. '
"Please use absolute path such as `sqlite:////tmp/airflow.db`."
)
flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
flask_app.config["SESSION_COOKIE_HTTPONLY"] = True
flask_app.config["SESSION_COOKIE_SECURE"] = conf.getboolean("webserver", "COOKIE_SECURE")
cookie_samesite_config = conf.get("webserver", "COOKIE_SAMESITE")
if cookie_samesite_config == "":
warnings.warn(
"Old deprecated value found for `cookie_samesite` option in `[webserver]` section. "
"Using `Lax` instead. Change the value to `Lax` in airflow.cfg to remove this warning.",
RemovedInAirflow3Warning,
)
cookie_samesite_config = "Lax"
flask_app.config["SESSION_COOKIE_SAMESITE"] = cookie_samesite_config
if config:
flask_app.config.from_mapping(config)
if "SQLALCHEMY_ENGINE_OPTIONS" not in flask_app.config:
flask_app.config["SQLALCHEMY_ENGINE_OPTIONS"] = settings.prepare_engine_args()
# Configure the JSON encoder used by `|tojson` filter from Flask
flask_app.json_provider_class = AirflowJsonProvider
flask_app.json = AirflowJsonProvider(flask_app)
InternalApiConfig.force_database_direct_access()
csrf.init_app(flask_app)
init_wsgi_middleware(flask_app)
db = SQLA()
db.session = settings.Session
db.init_app(flask_app)
init_dagbag(flask_app)
init_api_experimental_auth(flask_app)
init_robots(flask_app)
init_cache(flask_app)
init_flash_views(flask_app)
configure_logging()
configure_manifest_files(flask_app)
import_all_models()
with flask_app.app_context():
init_appbuilder(flask_app)
init_appbuilder_views(flask_app)
init_appbuilder_links(flask_app)
init_plugins(flask_app)
init_error_handlers(flask_app)
init_api_connexion(flask_app)
if conf.getboolean("webserver", "run_internal_api", fallback=False):
if not _ENABLE_AIP_44:
raise RuntimeError("The AIP_44 is not enabled so you cannot use it.")
init_api_internal(flask_app)
init_api_experimental(flask_app)
sync_appbuilder_roles(flask_app)
init_jinja_globals(flask_app)
init_xframe_protection(flask_app)
init_airflow_session_interface(flask_app)
init_check_user_active(flask_app)
return flask_app
def cached_app(config=None, testing=False):
"""Return cached instance of Airflow WWW app."""
global app
if not app:
app = create_app(config=config, testing=testing)
return app
def purge_cached_app():
"""Removes the cached version of the app in global state."""
global app
app = None
| 6,907 | 35.550265 | 110 | py |
airflow | airflow-main/airflow/www/utils.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import textwrap
import time
from typing import TYPE_CHECKING, Any, Callable, Sequence
from urllib.parse import urlencode
from flask import request, url_for
from flask.helpers import flash
from flask_appbuilder.forms import FieldConverter
from flask_appbuilder.models.filters import BaseFilter
from flask_appbuilder.models.sqla import Model, filters as fab_sqlafilters
from flask_appbuilder.models.sqla.filters import get_field_setup_query, set_value_to_type
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext
from markdown_it import MarkdownIt
from markupsafe import Markup
from pendulum.datetime import DateTime
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from pygments.lexer import Lexer
from sqlalchemy import delete, func, select, types
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.sql import Select
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.models import errors
from airflow.models.dagrun import DagRun
from airflow.models.dagwarning import DagWarning
from airflow.models.taskinstance import TaskInstance
from airflow.utils import timezone
from airflow.utils.code_utils import get_python_source
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.json import WebEncoder
from airflow.utils.state import State, TaskInstanceState
from airflow.www.forms import DateTimeWithTimezoneField
from airflow.www.widgets import AirflowDateTimePickerWidget
if TYPE_CHECKING:
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.operators import ColumnOperators
from airflow.www.fab_security.sqla.manager import SecurityManager
def datetime_to_string(value: DateTime | None) -> str | None:
if value is None:
return None
return value.isoformat()
def get_mapped_instances(task_instance, session):
return session.scalars(
select(TaskInstance)
.where(
TaskInstance.dag_id == task_instance.dag_id,
TaskInstance.run_id == task_instance.run_id,
TaskInstance.task_id == task_instance.task_id,
)
.order_by(TaskInstance.map_index)
).all()
def get_instance_with_map(task_instance, session):
if task_instance.map_index == -1:
return alchemy_to_dict(task_instance)
mapped_instances = get_mapped_instances(task_instance, session)
return get_mapped_summary(task_instance, mapped_instances)
def get_try_count(try_number: int, state: State):
if state in (TaskInstanceState.DEFERRED, TaskInstanceState.UP_FOR_RESCHEDULE):
return try_number + 1
return try_number
priority: list[None | TaskInstanceState] = [
TaskInstanceState.FAILED,
TaskInstanceState.UPSTREAM_FAILED,
TaskInstanceState.UP_FOR_RETRY,
TaskInstanceState.UP_FOR_RESCHEDULE,
TaskInstanceState.QUEUED,
TaskInstanceState.SCHEDULED,
TaskInstanceState.DEFERRED,
TaskInstanceState.RUNNING,
TaskInstanceState.SHUTDOWN,
TaskInstanceState.RESTARTING,
None,
TaskInstanceState.SUCCESS,
TaskInstanceState.SKIPPED,
TaskInstanceState.REMOVED,
]
def get_mapped_summary(parent_instance, task_instances):
mapped_states = [ti.state for ti in task_instances]
group_state = None
for state in priority:
if state in mapped_states:
group_state = state
break
group_start_date = datetime_to_string(
min((ti.start_date for ti in task_instances if ti.start_date), default=None)
)
group_end_date = datetime_to_string(
max((ti.end_date for ti in task_instances if ti.end_date), default=None)
)
return {
"task_id": parent_instance.task_id,
"run_id": parent_instance.run_id,
"state": group_state,
"start_date": group_start_date,
"end_date": group_end_date,
"mapped_states": mapped_states,
"try_number": get_try_count(parent_instance._try_number, parent_instance.state),
}
def get_dag_run_conf(
dag_run_conf: Any, *, json_encoder: type[json.JSONEncoder] = json.JSONEncoder
) -> tuple[str | None, bool]:
conf: str | None = None
conf_is_json: bool = False
if isinstance(dag_run_conf, str):
conf = dag_run_conf
elif isinstance(dag_run_conf, (dict, list)) and any(dag_run_conf):
conf = json.dumps(dag_run_conf, sort_keys=True, cls=json_encoder, ensure_ascii=False)
conf_is_json = True
return conf, conf_is_json
def encode_dag_run(
dag_run: DagRun | None, *, json_encoder: type[json.JSONEncoder] = json.JSONEncoder
) -> dict[str, Any] | None:
if not dag_run:
return None
conf, conf_is_json = get_dag_run_conf(dag_run.conf, json_encoder=json_encoder)
return {
"run_id": dag_run.run_id,
"queued_at": datetime_to_string(dag_run.queued_at),
"start_date": datetime_to_string(dag_run.start_date),
"end_date": datetime_to_string(dag_run.end_date),
"state": dag_run.state,
"execution_date": datetime_to_string(dag_run.execution_date),
"data_interval_start": datetime_to_string(dag_run.data_interval_start),
"data_interval_end": datetime_to_string(dag_run.data_interval_end),
"run_type": dag_run.run_type,
"last_scheduling_decision": datetime_to_string(dag_run.last_scheduling_decision),
"external_trigger": dag_run.external_trigger,
"conf": conf,
"conf_is_json": conf_is_json,
"note": dag_run.note,
}
def check_import_errors(fileloc, session):
# Check dag import errors
import_errors = session.scalars(
select(errors.ImportError).where(errors.ImportError.filename == fileloc)
).all()
if import_errors:
for import_error in import_errors:
flash("Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=import_error), "dag_import_error")
def check_dag_warnings(dag_id, session):
dag_warnings = session.scalars(select(DagWarning).where(DagWarning.dag_id == dag_id)).all()
if dag_warnings:
for dag_warning in dag_warnings:
flash(dag_warning.message, "warning")
def get_sensitive_variables_fields():
import warnings
from airflow.utils.log.secrets_masker import get_sensitive_variables_fields
warnings.warn(
"This function is deprecated. Please use "
"`airflow.utils.log.secrets_masker.get_sensitive_variables_fields`",
RemovedInAirflow3Warning,
stacklevel=2,
)
return get_sensitive_variables_fields()
def should_hide_value_for_key(key_name):
import warnings
from airflow.utils.log.secrets_masker import should_hide_value_for_key
warnings.warn(
"This function is deprecated. Please use "
"`airflow.utils.log.secrets_masker.should_hide_value_for_key`",
RemovedInAirflow3Warning,
stacklevel=2,
)
return should_hide_value_for_key(key_name)
def get_params(**kwargs):
"""Return URL-encoded params."""
return urlencode({d: v for d, v in kwargs.items() if v is not None}, True)
def generate_pages(
current_page,
num_of_pages,
search=None,
status=None,
tags=None,
window=7,
sorting_key=None,
sorting_direction=None,
):
"""
Generates the HTML for a paging component using a similar logic to the paging
auto-generated by Flask managed views. The paging component defines a number of
pages visible in the pager (window) and once the user goes to a page beyond the
largest visible, it would scroll to the right the page numbers and keeps the
current one in the middle of the pager component. When in the last pages,
the pages won't scroll and just keep moving until the last page. Pager also contains
<first, previous, ..., next, last> pages.
This component takes into account custom parameters such as search, status, and tags
which could be added to the pages link in order to maintain the state between
client and server. It also allows to make a bookmark on a specific paging state.
:param current_page: the current page number, 0-indexed
:param num_of_pages: the total number of pages
:param search: the search query string, if any
:param status: 'all', 'active', or 'paused'
:param tags: array of strings of the current filtered tags
:param window: the number of pages to be shown in the paging component (7 default)
:param sorting_key: the sorting key selected for dags, None indicates that sorting is not needed/provided
:param sorting_direction: direction of sorting, 'asc' or 'desc',
None indicates that sorting is not needed/provided
:return: the HTML string of the paging component
"""
void_link = "javascript:void(0)"
first_node = Markup(
"""<li class="paginate_button {disabled}" id="dags_first">
<a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">«</a>
</li>"""
)
previous_node = Markup(
"""<li class="paginate_button previous {disabled}" id="dags_previous">
<a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">‹</a>
</li>"""
)
next_node = Markup(
"""<li class="paginate_button next {disabled}" id="dags_next">
<a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">›</a>
</li>"""
)
last_node = Markup(
"""<li class="paginate_button {disabled}" id="dags_last">
<a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">»</a>
</li>"""
)
page_node = Markup(
"""<li class="paginate_button {is_active}">
<a href="{href_link}" aria-controls="dags" data-dt-idx="2" tabindex="0">{page_num}</a>
</li>"""
)
output = [Markup('<ul class="pagination" style="margin-top:0;">')]
is_disabled = "disabled" if current_page <= 0 else ""
qs = get_params(
page=0,
search=search,
status=status,
tags=tags,
sorting_key=sorting_key,
sorting_direction=sorting_direction,
)
first_node_link = void_link if is_disabled else f"?{qs}"
output.append(
first_node.format(
href_link=first_node_link,
disabled=is_disabled,
)
)
page_link = void_link
if current_page > 0:
qs = get_params(
page=current_page - 1,
search=search,
status=status,
tags=tags,
sorting_key=sorting_key,
sorting_direction=sorting_direction,
)
page_link = f"?{qs}"
output.append(previous_node.format(href_link=page_link, disabled=is_disabled))
mid = int(window / 2)
last_page = num_of_pages - 1
if current_page <= mid or num_of_pages < window:
pages = list(range(0, min(num_of_pages, window)))
elif mid < current_page < last_page - mid:
pages = list(range(current_page - mid, current_page + mid + 1))
else:
pages = list(range(num_of_pages - window, last_page + 1))
def is_current(current, page):
return page == current
for page in pages:
qs = get_params(
page=page,
search=search,
status=status,
tags=tags,
sorting_key=sorting_key,
sorting_direction=sorting_direction,
)
vals = {
"is_active": "active" if is_current(current_page, page) else "",
"href_link": void_link if is_current(current_page, page) else f"?{qs}",
"page_num": page + 1,
}
output.append(page_node.format(**vals))
is_disabled = "disabled" if current_page >= num_of_pages - 1 else ""
qs = get_params(
page=current_page + 1,
search=search,
status=status,
tags=tags,
sorting_key=sorting_key,
sorting_direction=sorting_direction,
)
page_link = void_link if current_page >= num_of_pages - 1 else f"?{qs}"
output.append(next_node.format(href_link=page_link, disabled=is_disabled))
qs = get_params(
page=last_page,
search=search,
status=status,
tags=tags,
sorting_key=sorting_key,
sorting_direction=sorting_direction,
)
last_node_link = void_link if is_disabled else f"?{qs}"
output.append(
last_node.format(
href_link=last_node_link,
disabled=is_disabled,
)
)
output.append(Markup("</ul>"))
return Markup("\n".join(output))
def epoch(dttm):
"""Returns an epoch-type date (tuple with no timezone)."""
return (int(time.mktime(dttm.timetuple())) * 1000,)
def make_cache_key(*args, **kwargs):
"""Used by cache to get a unique key per URL."""
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode("ascii", "ignore")
def task_instance_link(attr):
"""Generates a URL to the Graph view for a TaskInstance."""
dag_id = attr.get("dag_id")
task_id = attr.get("task_id")
execution_date = attr.get("dag_run.execution_date") or attr.get("execution_date") or timezone.utcnow()
url = url_for(
"Airflow.task",
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date.isoformat(),
map_index=attr.get("map_index", -1),
)
url_root = url_for(
"Airflow.graph", dag_id=dag_id, root=task_id, execution_date=execution_date.isoformat()
)
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="material-icons" style="margin-left:0;"
aria-hidden="true">filter_alt</span>
</a>
</span>
"""
).format(url=url, task_id=task_id, url_root=url_root)
def state_token(state):
"""Returns a formatted string with HTML for a given State."""
color = State.color(state)
fg_color = State.color_fg(state)
return Markup(
"""
<span class="label" style="color:{fg_color}; background-color:{color};"
title="Current State: {state}">{state}</span>
"""
).format(color=color, state=state, fg_color=fg_color)
def state_f(attr):
"""Gets 'state' & returns a formatted string with HTML for a given State."""
state = attr.get("state")
return state_token(state)
def nobr_f(attr_name):
"""Returns a formatted string with HTML with a Non-breaking Text element."""
def nobr(attr):
f = attr.get(attr_name)
return Markup("<nobr>{}</nobr>").format(f)
return nobr
def datetime_f(attr_name):
"""Returns a formatted string with HTML for given DataTime."""
def dt(attr):
f = attr.get(attr_name)
return datetime_html(f)
return dt
def datetime_html(dttm: DateTime | None) -> str:
"""Return an HTML formatted string with time element to support timezone changes in UI."""
as_iso = dttm.isoformat() if dttm else ""
if not as_iso:
return Markup("")
as_iso_short = as_iso
if timezone.utcnow().isoformat()[:4] == as_iso[:4]:
as_iso_short = as_iso[5:]
# The empty title will be replaced in JS code when non-UTC dates are displayed
return Markup('<nobr><time title="" datetime="{}">{}</time></nobr>').format(as_iso, as_iso_short)
def json_f(attr_name):
"""Returns a formatted string with HTML for given JSON serializable."""
def json_(attr):
f = attr.get(attr_name)
serialized = json.dumps(f, cls=WebEncoder)
return Markup("<nobr>{}</nobr>").format(serialized)
return json_
def dag_link(attr):
"""Generates a URL to the Graph view for a Dag."""
dag_id = attr.get("dag_id")
execution_date = attr.get("execution_date")
if not dag_id:
return Markup("None")
url = url_for("Airflow.graph", dag_id=dag_id, execution_date=execution_date)
return Markup('<a href="{}">{}</a>').format(url, dag_id)
def dag_run_link(attr):
"""Generates a URL to the Graph view for a DagRun."""
dag_id = attr.get("dag_id")
run_id = attr.get("run_id")
execution_date = attr.get("dag_run.execution_date") or attr.get("execution_date")
url = url_for("Airflow.graph", dag_id=dag_id, run_id=run_id, execution_date=execution_date)
return Markup('<a href="{url}">{run_id}</a>').format(url=url, run_id=run_id)
def _get_run_ordering_expr(name: str) -> ColumnOperators:
expr = DagRun.__table__.columns[name]
# Data interval columns are NULL for runs created before 2.3, but SQL's
# NULL-sorting logic would make those old runs always appear first. In a
# perfect world we'd want to sort by ``get_run_data_interval()``, but that's
# not efficient, so instead the columns are coalesced into execution_date,
# which is good enough in most cases.
if name in ("data_interval_start", "data_interval_end"):
expr = func.coalesce(expr, DagRun.execution_date)
return expr.desc()
def sorted_dag_runs(
query: Select, *, ordering: Sequence[str], limit: int, session: Session
) -> Sequence[DagRun]:
"""Produce DAG runs sorted by specified columns.
:param query: An ORM select object against *DagRun*.
:param ordering: Column names to sort the runs. should generally come from a
timetable's ``run_ordering``.
:param limit: Number of runs to limit to.
:param session: SQLAlchemy ORM session object
:return: A list of DagRun objects ordered by the specified columns. The list
contains only the *last* objects, but in *ascending* order.
"""
ordering_exprs = (_get_run_ordering_expr(name) for name in ordering)
runs = session.scalars(query.order_by(*ordering_exprs, DagRun.id.desc()).limit(limit)).all()
runs.reverse()
return runs
def format_map_index(attr: dict) -> str:
"""Format map index for list columns in model view."""
value = attr["map_index"]
if value < 0:
return Markup(" ")
return str(value)
def pygment_html_render(s, lexer=lexers.TextLexer):
"""Highlight text using a given Lexer."""
return highlight(s, lexer(), HtmlFormatter(linenos=True))
def render(obj: Any, lexer: Lexer, handler: Callable[[Any], str] | None = None):
"""Render a given Python object with a given Pygments lexer."""
if isinstance(obj, str):
return Markup(pygment_html_render(obj, lexer))
elif isinstance(obj, (tuple, list)):
out = ""
for i, text_to_render in enumerate(obj):
if lexer is lexers.PythonLexer:
text_to_render = repr(text_to_render)
out += Markup("<div>List item #{}</div>").format(i)
out += Markup("<div>" + pygment_html_render(text_to_render, lexer) + "</div>")
return out
elif isinstance(obj, dict):
out = ""
for k, v in obj.items():
if lexer is lexers.PythonLexer:
v = repr(v)
out += Markup('<div>Dict item "{}"</div>').format(k)
out += Markup("<div>" + pygment_html_render(v, lexer) + "</div>")
return out
elif handler is not None and obj is not None:
return Markup(pygment_html_render(handler(obj), lexer))
else:
# Return empty string otherwise
return ""
def json_render(obj, lexer):
"""Render a given Python object with json lexer."""
out = ""
if isinstance(obj, str):
out = Markup(pygment_html_render(obj, lexer))
elif isinstance(obj, (dict, list)):
content = json.dumps(obj, sort_keys=True, indent=4)
out = Markup(pygment_html_render(content, lexer))
return out
def wrapped_markdown(s, css_class="rich_doc"):
"""Convert a Markdown string to HTML."""
md = MarkdownIt("gfm-like")
if s is None:
return None
s = textwrap.dedent(s)
return Markup(f'<div class="{css_class}" >{md.render(s)}</div>')
def get_attr_renderer():
"""Return Dictionary containing different Pygments Lexers for Rendering & Highlighting."""
return {
"bash": lambda x: render(x, lexers.BashLexer),
"bash_command": lambda x: render(x, lexers.BashLexer),
"doc": lambda x: render(x, lexers.TextLexer),
"doc_json": lambda x: render(x, lexers.JsonLexer),
"doc_md": wrapped_markdown,
"doc_rst": lambda x: render(x, lexers.RstLexer),
"doc_yaml": lambda x: render(x, lexers.YamlLexer),
"hql": lambda x: render(x, lexers.SqlLexer),
"html": lambda x: render(x, lexers.HtmlLexer),
"jinja": lambda x: render(x, lexers.DjangoLexer),
"json": lambda x: json_render(x, lexers.JsonLexer),
"md": wrapped_markdown,
"mysql": lambda x: render(x, lexers.MySqlLexer),
"postgresql": lambda x: render(x, lexers.PostgresLexer),
"powershell": lambda x: render(x, lexers.PowerShellLexer),
"py": lambda x: render(x, lexers.PythonLexer, get_python_source),
"python_callable": lambda x: render(x, lexers.PythonLexer, get_python_source),
"rst": lambda x: render(x, lexers.RstLexer),
"sql": lambda x: render(x, lexers.SqlLexer),
"tsql": lambda x: render(x, lexers.TransactSqlLexer),
"yaml": lambda x: render(x, lexers.YamlLexer),
}
def get_chart_height(dag):
"""
We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
TODO(aoen): See [AIRFLOW-1263].
"""
return 600 + len(dag.tasks) * 10
class UtcAwareFilterMixin:
"""Mixin for filter for UTC time."""
def apply(self, query, value):
"""Apply the filter."""
if isinstance(value, str) and not value.strip():
value = None
else:
value = timezone.parse(value, timezone=timezone.utc)
return super().apply(query, value)
class FilterIsNull(BaseFilter):
"""Is null filter."""
name = lazy_gettext("Is Null")
arg_name = "emp"
def apply(self, query, value):
query, field = get_field_setup_query(query, self.model, self.column_name)
value = set_value_to_type(self.datamodel, self.column_name, None)
return query.filter(field == value)
class FilterIsNotNull(BaseFilter):
"""Is not null filter."""
name = lazy_gettext("Is not Null")
arg_name = "nemp"
def apply(self, query, value):
query, field = get_field_setup_query(query, self.model, self.column_name)
value = set_value_to_type(self.datamodel, self.column_name, None)
return query.filter(field != value)
class FilterGreaterOrEqual(BaseFilter):
"""Greater than or Equal filter."""
name = lazy_gettext("Greater than or Equal")
arg_name = "gte"
def apply(self, query, value):
query, field = get_field_setup_query(query, self.model, self.column_name)
value = set_value_to_type(self.datamodel, self.column_name, value)
if value is None:
return query
return query.filter(field >= value)
class FilterSmallerOrEqual(BaseFilter):
"""Smaller than or Equal filter."""
name = lazy_gettext("Smaller than or Equal")
arg_name = "lte"
def apply(self, query, value):
query, field = get_field_setup_query(query, self.model, self.column_name)
value = set_value_to_type(self.datamodel, self.column_name, value)
if value is None:
return query
return query.filter(field <= value)
class UtcAwareFilterSmallerOrEqual(UtcAwareFilterMixin, FilterSmallerOrEqual):
"""Smaller than or Equal filter for UTC time."""
class UtcAwareFilterGreaterOrEqual(UtcAwareFilterMixin, FilterGreaterOrEqual):
"""Greater than or Equal filter for UTC time."""
class UtcAwareFilterEqual(UtcAwareFilterMixin, fab_sqlafilters.FilterEqual):
"""Equality filter for UTC time."""
class UtcAwareFilterGreater(UtcAwareFilterMixin, fab_sqlafilters.FilterGreater):
"""Greater Than filter for UTC time."""
class UtcAwareFilterSmaller(UtcAwareFilterMixin, fab_sqlafilters.FilterSmaller):
"""Smaller Than filter for UTC time."""
class UtcAwareFilterNotEqual(UtcAwareFilterMixin, fab_sqlafilters.FilterNotEqual):
"""Not Equal To filter for UTC time."""
class UtcAwareFilterConverter(fab_sqlafilters.SQLAFilterConverter):
"""Retrieve conversion tables for UTC-Aware filters."""
class AirflowFilterConverter(fab_sqlafilters.SQLAFilterConverter):
"""Retrieve conversion tables for Airflow-specific filters."""
conversion_table = (
(
"is_utcdatetime",
[
UtcAwareFilterEqual,
UtcAwareFilterGreater,
UtcAwareFilterSmaller,
UtcAwareFilterNotEqual,
UtcAwareFilterSmallerOrEqual,
UtcAwareFilterGreaterOrEqual,
],
),
# FAB will try to create filters for extendedjson fields even though we
# exclude them from all UI, so we add this here to make it ignore them.
(
"is_extendedjson",
[],
),
) + fab_sqlafilters.SQLAFilterConverter.conversion_table
def __init__(self, datamodel):
super().__init__(datamodel)
for method, filters in self.conversion_table:
if FilterIsNull not in filters:
filters.append(FilterIsNull)
if FilterIsNotNull not in filters:
filters.append(FilterIsNotNull)
class CustomSQLAInterface(SQLAInterface):
"""
FAB does not know how to handle columns with leading underscores because
they are not supported by WTForm. This hack will remove the leading
'_' from the key to lookup the column names.
"""
def __init__(self, obj, session: Session | None = None):
super().__init__(obj, session=session)
def clean_column_names():
if self.list_properties:
self.list_properties = {k.lstrip("_"): v for k, v in self.list_properties.items()}
if self.list_columns:
self.list_columns = {k.lstrip("_"): v for k, v in self.list_columns.items()}
clean_column_names()
# Support for AssociationProxy in search and list columns
for obj_attr, desc in self.obj.__mapper__.all_orm_descriptors.items():
if not isinstance(desc, AssociationProxy):
continue
proxy_instance = getattr(self.obj, obj_attr)
if hasattr(proxy_instance.remote_attr.prop, "columns"):
self.list_columns[obj_attr] = proxy_instance.remote_attr.prop.columns[0]
self.list_properties[obj_attr] = proxy_instance.remote_attr.prop
def is_utcdatetime(self, col_name):
"""Check if the datetime is a UTC one."""
from airflow.utils.sqlalchemy import UtcDateTime
if col_name in self.list_columns:
obj = self.list_columns[col_name].type
return (
isinstance(obj, UtcDateTime)
or isinstance(obj, types.TypeDecorator)
and isinstance(obj.impl, UtcDateTime)
)
return False
def is_extendedjson(self, col_name):
"""Checks if it is a special extended JSON type."""
from airflow.utils.sqlalchemy import ExtendedJSON
if col_name in self.list_columns:
obj = self.list_columns[col_name].type
return (
isinstance(obj, ExtendedJSON)
or isinstance(obj, types.TypeDecorator)
and isinstance(obj.impl, ExtendedJSON)
)
return False
def get_col_default(self, col_name: str) -> Any:
if col_name not in self.list_columns:
# Handle AssociationProxy etc, or anything that isn't a "real" column
return None
return super().get_col_default(col_name)
filter_converter_class = AirflowFilterConverter
class DagRunCustomSQLAInterface(CustomSQLAInterface):
"""Custom interface to allow faster deletion.
The ``delete`` and ``delete_all`` methods are overridden to speed up
deletion when a DAG run has a lot of related task instances. Relying on
SQLAlchemy's cascading deletion is comparatively slow in this situation.
"""
def delete(self, item: Model, raise_exception: bool = False) -> bool:
self.session.execute(delete(TaskInstance).where(TaskInstance.run_id == item.run_id))
return super().delete(item, raise_exception=raise_exception)
def delete_all(self, items: list[Model]) -> bool:
self.session.execute(
delete(TaskInstance).where(TaskInstance.run_id.in_(item.run_id for item in items))
)
return super().delete_all(items)
# This class is used directly (i.e. we can't tell Fab to use a different
# subclass) so we have no other option than to edit the conversion table in
# place
FieldConverter.conversion_table = (
("is_utcdatetime", DateTimeWithTimezoneField, AirflowDateTimePickerWidget),
) + FieldConverter.conversion_table
class UIAlert:
"""
Helper for alerts messages shown on the UI.
:param message: The message to display, either a string or Markup
:param category: The category of the message, one of "info", "warning", "error", or any custom category.
Defaults to "info".
:param roles: List of roles that should be shown the message. If ``None``, show to all users.
:param html: Whether the message has safe html markup in it. Defaults to False.
For example, show a message to all users:
.. code-block:: python
UIAlert("Welcome to Airflow")
Or only for users with the User role:
.. code-block:: python
UIAlert("Airflow update happening next week", roles=["User"])
You can also pass html in the message:
.. code-block:: python
UIAlert('Visit <a href="https://airflow.apache.org">airflow.apache.org</a>', html=True)
# or safely escape part of the message
# (more details: https://markupsafe.palletsprojects.com/en/2.0.x/formatting/)
UIAlert(Markup("Welcome <em>%s</em>") % ("John & Jane Doe",))
"""
def __init__(
self,
message: str | Markup,
category: str = "info",
roles: list[str] | None = None,
html: bool = False,
):
self.category = category
self.roles = roles
self.html = html
self.message = Markup(message) if html else message
def should_show(self, securitymanager: SecurityManager) -> bool:
"""Determine if the user should see the message.
The decision is based on the user's role. If ``AUTH_ROLE_PUBLIC`` is
set in ``webserver_config.py``, An anonymous user would have the
``AUTH_ROLE_PUBLIC`` role.
"""
if self.roles:
current_user = securitymanager.current_user
if current_user is not None:
user_roles = {r.name for r in securitymanager.current_user.roles}
elif "AUTH_ROLE_PUBLIC" in securitymanager.appbuilder.get_app.config:
# If the current_user is anonymous, assign AUTH_ROLE_PUBLIC role (if it exists) to them
user_roles = {securitymanager.appbuilder.get_app.config["AUTH_ROLE_PUBLIC"]}
else:
# Unable to obtain user role - default to not showing
return False
if user_roles.isdisjoint(self.roles):
return False
return True
| 32,481 | 33.851931 | 109 | py |
airflow | airflow-main/airflow/www/security.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import Any, Collection, Container, Iterable, Sequence
from flask import g
from sqlalchemy import or_
from sqlalchemy.orm import Session, joinedload
from airflow.configuration import auth_manager
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.www.fab_security.sqla.manager import SecurityManager
from airflow.www.fab_security.sqla.models import Permission, Resource, Role, User
from airflow.www.fab_security.views import (
ActionModelView,
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomRoleModelView,
CustomUserDBModelView,
CustomUserInfoEditView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
CustomUserStatsChartView,
PermissionPairModelView,
ResourceModelView,
)
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
"Admin",
"Viewer",
"User",
"Op",
"Public",
}
class AirflowSecurityManager(SecurityManager, LoggingMixin):
"""Custom security manager, which introduces a permission model adapted to Airflow."""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global resource for dag-level access
DAG_RESOURCES = {permissions.RESOURCE_DAG}
DAG_ACTIONS = permissions.DAG_ACTIONS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS: list[dict[str, Any]] = [
{"role": "Public", "perms": []},
{"role": "Viewer", "perms": VIEWER_PERMISSIONS},
{
"role": "User",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
"role": "Op",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
"role": "Admin",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
actionmodelview = ActionModelView
permissionmodelview = PermissionPairModelView
rolemodelview = CustomRoleModelView
resourcemodelview = ResourceModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder) -> None:
super().__init__(appbuilder)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith("view"):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, "datamodel", None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def create_db(self) -> None:
if not self.appbuilder.update_perms:
self.log.debug("Skipping db since appbuilder disables update_perms")
return
super().create_db()
def _get_root_dag_id(self, dag_id: str) -> str:
if "." in dag_id:
dm = (
self.appbuilder.get_session.query(DagModel.dag_id, DagModel.root_dag_id)
.filter(DagModel.dag_id == dag_id)
.first()
)
return dm.root_dag_id or dm.dag_id
return dag_id
def init_role(self, role_name, perms) -> None:
"""
Initialize the role with actions and related resources.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.bulk_sync_roles([{"role": role_name, "perms": perms}])
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config["role"]
perms = config["perms"]
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(
action_name, resource_name
)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
def delete_role(self, role_name: str) -> None:
"""
Delete the given Role.
:param role_name: the name of a role in the ab_role table
"""
session = self.appbuilder.get_session
role = session.query(Role).filter(Role.name == role_name).first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException(f"Role named '{role_name}' does not exist")
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
@provide_session
def get_accessible_dags(
self,
user_actions: Container[str] | None,
user,
session: Session = NEW_SESSION,
) -> Iterable[DagModel]:
warnings.warn(
"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=3,
)
dag_ids = self.get_accessible_dag_ids(user, user_actions, session)
return session.query(DagModel).filter(DagModel.dag_id.in_(dag_ids))
def get_readable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(
self,
user,
user_actions: Container[str] | None = None,
session: Session = NEW_SESSION,
) -> set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
if not auth_manager.is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (
permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)
):
return {dag.dag_id for dag in session.query(DagModel.dag_id)}
user_query = (
session.query(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(Permission.action), joinedload(Permission.resource))
)
.filter(User.id == user.id)
.first()
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.query(DagModel.dag_id)}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return {dag.dag_id for dag in session.query(DagModel.dag_id).filter(DagModel.dag_id.in_(resources))}
def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != "~":
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)
def can_edit_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)
def can_delete_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) -> str:
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) -> bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None) -> bool:
"""
Verify whether a given user could perform a certain action
(e.g can_read, can_write, can_delete) on the given resource.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) -> bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) -> bool:
"""
Has all the dag access in any of the 3 cases:
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return (
self._has_role(["Admin", "Viewer", "Op", "User"], user)
or self.can_read_all_dags(user)
or self.can_edit_all_dags(user)
)
def can_edit_all_dags(self, user=None) -> bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) -> bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)
def clean_perms(self) -> None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug("Cleaning faulty perms")
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(
or_(
Permission.action == None, # noqa
Permission.resource == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info("Deleted %s faulty permissions", deleted_count)
def _merge_perm(self, action_name: str, resource_name: str) -> None:
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = (
self.appbuilder.get_session.query(self.permission_model)
.filter_by(action=action, resource=resource)
.first()
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self) -> None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) -> set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(
self.appbuilder.get_session.query(self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.with_entities(self.action_model.name, self.resource_model.name)
.all()
)
def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:
"""
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.appbuilder.get_session.query(self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.filter(~self.resource_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.with_entities(self.action_model.name, self.resource_model.name, self.permission_model)
.all()
)
}
def _get_all_roles_with_permissions(self) -> dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {
r.name: r
for r in self.appbuilder.get_session.query(self.role_model).options(
joinedload(self.role_model.permissions)
)
}
def create_dag_specific_permissions(self) -> None:
"""
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) -> None:
"""
Admin should have all the permissions, except the dag permissions.
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.query(Resource).filter(
Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%")
)
resource_ids = [resource.id for resource in dag_resources]
perms = session.query(Permission).filter(~Permission.resource_id.in_(resource_ids))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role("Admin")
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) -> None:
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(
self,
dag_id: str,
access_control: dict[str, Collection[str]] | None = None,
) -> None:
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
def _revoke_all_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != "Admin"]
for role in non_admin_roles:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.action,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
if access_control:
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_all_stale_permissions(resource)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) -> Permission | None:
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != "Admin"]
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.action,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named "
f"'{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes "
f"the following invalid permissions: {invalid_action_names}; "
f"The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) -> None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self,
perms: Sequence[tuple[str, str]] | None = None,
dag_id: str | None = None,
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: Session | None = None) -> None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: Session | None = None):
self.appbuilder = FakeAppBuilder(session)
| 33,819 | 41.540881 | 110 | py |
airflow | airflow-main/airflow/www/session.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from flask import request
from flask.sessions import SecureCookieSessionInterface
from flask_session.sessions import SqlAlchemySessionInterface
class SesssionExemptMixin:
"""Exempt certain blueprints/paths from autogenerated sessions."""
def save_session(self, *args, **kwargs):
"""Prevent creating session from REST API and health requests."""
if request.blueprint == "/api/v1":
return None
if request.path == "/health":
return None
return super().save_session(*args, **kwargs)
class AirflowDatabaseSessionInterface(SesssionExemptMixin, SqlAlchemySessionInterface):
"""Session interface that exempts some routes and stores session data in the database."""
class AirflowSecureCookieSessionInterface(SesssionExemptMixin, SecureCookieSessionInterface):
"""Session interface that exempts some routes and stores session data in a signed cookie."""
| 1,744 | 40.547619 | 96 | py |
airflow | airflow-main/airflow/www/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/www/forms.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import operator
from datetime import datetime as dt
from typing import Iterator
import pendulum
from flask_appbuilder.fieldwidgets import (
BS3PasswordFieldWidget,
BS3TextAreaFieldWidget,
BS3TextFieldWidget,
Select2Widget,
)
from flask_appbuilder.forms import DynamicForm
from flask_babel import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import widgets
from wtforms.fields import Field, IntegerField, PasswordField, SelectField, StringField, TextAreaField
from wtforms.validators import InputRequired, Optional
from airflow.compat.functools import cache
from airflow.configuration import conf
from airflow.providers_manager import ProvidersManager
from airflow.utils import timezone
from airflow.utils.types import DagRunType
from airflow.www.validators import ValidKey
from airflow.www.widgets import (
AirflowDateTimePickerROWidget,
AirflowDateTimePickerWidget,
BS3TextAreaROWidget,
BS3TextFieldROWidget,
)
class DateTimeWithTimezoneField(Field):
"""A text field which stores a `datetime.datetime` matching a format."""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, datetime_format="%Y-%m-%d %H:%M:%S%Z", **kwargs):
super().__init__(label, validators, **kwargs)
self.format = datetime_format
self.data = None
def _value(self):
if self.raw_data:
return " ".join(self.raw_data)
if self.data:
return self.data.strftime(self.format)
return ""
def process_formdata(self, valuelist):
if not valuelist:
return
date_str = " ".join(valuelist)
try:
# Check if the datetime string is in the format without timezone, if so convert it to the
# default timezone
if len(date_str) == 19:
parsed_datetime = dt.strptime(date_str, "%Y-%m-%d %H:%M:%S")
default_timezone = self._get_default_timezone()
self.data = default_timezone.convert(parsed_datetime)
else:
self.data = pendulum.parse(date_str)
except ValueError:
self.data = None
raise ValueError(self.gettext("Not a valid datetime value"))
def _get_default_timezone(self):
current_timezone = conf.get("core", "default_timezone")
if current_timezone == "system":
default_timezone = pendulum.local_timezone()
else:
default_timezone = pendulum.timezone(current_timezone)
return default_timezone
class DateTimeForm(FlaskForm):
"""Date filter form needed for task views."""
execution_date = DateTimeWithTimezoneField("Logical date", widget=AirflowDateTimePickerWidget())
class DateTimeWithNumRunsForm(FlaskForm):
"""
Date time and number of runs form for tree view, task duration
and landing times.
"""
base_date = DateTimeWithTimezoneField(
"Anchor date", widget=AirflowDateTimePickerWidget(), default=timezone.utcnow()
)
num_runs = SelectField(
"Number of runs",
default=25,
choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
),
)
class DateTimeWithNumRunsWithDagRunsForm(DateTimeWithNumRunsForm):
"""Date time and number of runs and dag runs form for graph and gantt view."""
execution_date = SelectField("DAG run")
class DagRunEditForm(DynamicForm):
"""Form for editing DAG Run.
We don't actually want to allow editing, so everything is read-only here.
"""
dag_id = StringField(lazy_gettext("Dag Id"), widget=BS3TextFieldROWidget())
start_date = DateTimeWithTimezoneField(lazy_gettext("Start Date"), widget=AirflowDateTimePickerROWidget())
end_date = DateTimeWithTimezoneField(lazy_gettext("End Date"), widget=AirflowDateTimePickerROWidget())
run_id = StringField(lazy_gettext("Run Id"), widget=BS3TextFieldROWidget())
state = StringField(lazy_gettext("State"), widget=BS3TextFieldROWidget())
execution_date = DateTimeWithTimezoneField(
lazy_gettext("Logical Date"),
widget=AirflowDateTimePickerROWidget(),
)
conf = TextAreaField(lazy_gettext("Conf"), widget=BS3TextAreaROWidget())
note = TextAreaField(lazy_gettext("User Note"), widget=BS3TextAreaFieldWidget())
def populate_obj(self, item):
"""Populates the attributes of the passed obj with data from the form's fields."""
super().populate_obj(item)
item.run_type = DagRunType.from_run_id(item.run_id)
if item.conf:
item.conf = json.loads(item.conf)
class TaskInstanceEditForm(DynamicForm):
"""Form for editing TaskInstance."""
dag_id = StringField(lazy_gettext("Dag Id"), validators=[InputRequired()], widget=BS3TextFieldROWidget())
task_id = StringField(
lazy_gettext("Task Id"), validators=[InputRequired()], widget=BS3TextFieldROWidget()
)
start_date = DateTimeWithTimezoneField(lazy_gettext("Start Date"), widget=AirflowDateTimePickerROWidget())
end_date = DateTimeWithTimezoneField(lazy_gettext("End Date"), widget=AirflowDateTimePickerROWidget())
state = SelectField(
lazy_gettext("State"),
choices=(
("success", "success"),
("running", "running"),
("failed", "failed"),
("up_for_retry", "up_for_retry"),
),
widget=Select2Widget(),
validators=[InputRequired()],
)
execution_date = DateTimeWithTimezoneField(
lazy_gettext("Logical Date"),
widget=AirflowDateTimePickerROWidget(),
validators=[InputRequired()],
)
note = TextAreaField(lazy_gettext("User Note"), widget=BS3TextAreaFieldWidget())
@cache
def create_connection_form_class() -> type[DynamicForm]:
"""Create a form class for editing and adding Connection.
This class is created dynamically because it relies heavily on run-time
provider discovery, which slows down webserver startup a lot.
By creating the class at runtime, we can delay loading the providers until
when the connection form is first used, which may as well be never for a
short-lived server.
"""
providers_manager = ProvidersManager()
def _iter_connection_types() -> Iterator[tuple[str, str]]:
"""List available connection types."""
yield ("email", "Email")
yield ("fs", "File (path)")
yield ("generic", "Generic")
yield ("mesos_framework-id", "Mesos Framework ID")
for connection_type, provider_info in providers_manager.hooks.items():
if provider_info:
yield (connection_type, provider_info.hook_name)
class ConnectionForm(DynamicForm):
def process(self, formdata=None, obj=None, **kwargs):
super().process(formdata=formdata, obj=obj, **kwargs)
for field in self._fields.values():
if isinstance(getattr(field, "data", None), str):
field.data = field.data.strip()
conn_id = StringField(
lazy_gettext("Connection Id"),
validators=[InputRequired(), ValidKey()],
widget=BS3TextFieldWidget(),
)
conn_type = SelectField(
lazy_gettext("Connection Type"),
choices=sorted(_iter_connection_types(), key=operator.itemgetter(1)),
widget=Select2Widget(),
validators=[InputRequired()],
description=(
"Connection Type missing? Make sure you've installed the "
"corresponding Airflow Provider Package."
),
)
description = StringField(lazy_gettext("Description"), widget=BS3TextAreaFieldWidget())
host = StringField(lazy_gettext("Host"), widget=BS3TextFieldWidget())
schema = StringField(lazy_gettext("Schema"), widget=BS3TextFieldWidget())
login = StringField(lazy_gettext("Login"), widget=BS3TextFieldWidget())
password = PasswordField(lazy_gettext("Password"), widget=BS3PasswordFieldWidget())
port = IntegerField(lazy_gettext("Port"), validators=[Optional()], widget=BS3TextFieldWidget())
extra = TextAreaField(lazy_gettext("Extra"), widget=BS3TextAreaFieldWidget())
for key, value in providers_manager.connection_form_widgets.items():
setattr(ConnectionForm, key, value.field)
return ConnectionForm
| 9,261 | 37.591667 | 110 | py |
airflow | airflow-main/airflow/www/blueprints.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from flask import Blueprint, redirect, url_for
routes = Blueprint("routes", __name__)
@routes.route("/")
def index():
"""Main Airflow page."""
return redirect(url_for("Airflow.index"))
| 1,019 | 34.172414 | 62 | py |
airflow | airflow-main/airflow/www/decorators.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import functools
import gzip
import json
import logging
from io import BytesIO as IO
from itertools import chain
from typing import Callable, TypeVar, cast
import pendulum
from flask import after_this_request, g, request
from pendulum.parsing.exceptions import ParserError
from airflow.configuration import auth_manager
from airflow.models import Log
from airflow.utils.log import secrets_masker
from airflow.utils.session import create_session
T = TypeVar("T", bound=Callable)
logger = logging.getLogger(__name__)
def _mask_variable_fields(extra_fields):
"""
The variable requests values and args comes in this form:
[('key', 'key_content'),('val', 'val_content'), ('description', 'description_content')]
So we need to mask the 'val_content' field if 'key_content' is in the mask list.
"""
result = []
keyname = None
for k, v in extra_fields:
if k == "key":
keyname = v
result.append((k, v))
elif keyname and k == "val":
x = secrets_masker.redact(v, keyname)
result.append((k, x))
keyname = None
else:
result.append((k, v))
return result
def _mask_connection_fields(extra_fields):
"""Mask connection fields."""
result = []
for k, v in extra_fields:
if k == "extra":
try:
extra = json.loads(v)
extra = [(k, secrets_masker.redact(v, k)) for k, v in extra.items()]
result.append((k, json.dumps(dict(extra))))
except json.JSONDecodeError:
result.append((k, "Encountered non-JSON in `extra` field"))
else:
result.append((k, secrets_masker.redact(v, k)))
return result
def action_logging(func: Callable | None = None, event: str | None = None) -> Callable[[T], T]:
"""Decorator to log user actions."""
def log_action(f: T) -> T:
@functools.wraps(f)
def wrapper(*args, **kwargs):
__tracebackhide__ = True # Hide from pytest traceback.
with create_session() as session:
if not auth_manager.is_logged_in():
user = "anonymous"
else:
user = f"{g.user.username} ({g.user.get_full_name()})"
fields_skip_logging = {"csrf_token", "_csrf_token"}
extra_fields = [
(k, secrets_masker.redact(v, k))
for k, v in chain(request.values.items(multi=True), request.view_args.items())
if k not in fields_skip_logging
]
if event and event.startswith("variable."):
extra_fields = _mask_variable_fields(extra_fields)
if event and event.startswith("connection."):
extra_fields = _mask_connection_fields(extra_fields)
params = {k: v for k, v in chain(request.values.items(), request.view_args.items())}
log = Log(
event=event or f.__name__,
task_instance=None,
owner=user,
extra=str(extra_fields),
task_id=params.get("task_id"),
dag_id=params.get("dag_id"),
)
if "execution_date" in request.values:
execution_date_value = request.values.get("execution_date")
try:
log.execution_date = pendulum.parse(execution_date_value, strict=False)
except ParserError:
logger.exception(
"Failed to parse execution_date from the request: %s", execution_date_value
)
session.add(log)
return f(*args, **kwargs)
return cast(T, wrapper)
if func:
return log_action(func)
return log_action
def gzipped(f: T) -> T:
"""Decorator to make a view compressed."""
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get("Accept-Encoding", "")
if "gzip" not in accept_encoding.lower():
return response
response.direct_passthrough = False
if (
response.status_code < 200
or response.status_code >= 300
or "Content-Encoding" in response.headers
):
return response
gzip_buffer = IO()
gzip_file = gzip.GzipFile(mode="wb", fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers["Content-Encoding"] = "gzip"
response.headers["Vary"] = "Accept-Encoding"
response.headers["Content-Length"] = len(response.data)
return response
return f(*args, **kwargs)
return cast(T, view_func)
| 5,862 | 33.488235 | 103 | py |
airflow | airflow-main/airflow/www/api/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/www/api/experimental/endpoints.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from functools import wraps
from typing import Callable, TypeVar, cast
from flask import Blueprint, Response, current_app, g, jsonify, request, url_for
from airflow import models
from airflow.api.common.experimental import delete_dag as delete, pool as pool_api, trigger_dag as trigger
from airflow.api.common.experimental.get_code import get_code
from airflow.api.common.experimental.get_dag_run_state import get_dag_run_state
from airflow.api.common.experimental.get_dag_runs import get_dag_runs
from airflow.api.common.experimental.get_lineage import get_lineage as get_lineage_api
from airflow.api.common.experimental.get_task import get_task
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException
from airflow.utils import timezone
from airflow.utils.docs import get_docs_url
from airflow.utils.strings import to_boolean
from airflow.version import version
log = logging.getLogger(__name__)
T = TypeVar("T", bound=Callable)
def requires_authentication(function: T):
"""Decorator for functions that require authentication."""
@wraps(function)
def decorated(*args, **kwargs):
auth = current_app.api_auth[0]
return auth.requires_authentication(function)(*args, **kwargs)
return cast(T, decorated)
api_experimental = Blueprint("api_experimental", __name__)
def add_deprecation_headers(response: Response):
"""
Add `Deprecation HTTP Header Field
<https://tools.ietf.org/id/draft-dalal-deprecation-header-03.html>`__.
"""
response.headers["Deprecation"] = "true"
doc_url = get_docs_url("upgrading-to-2.html#migration-guide-from-experimental-api-to-stable-api-v1")
deprecation_link = f'<{doc_url}>; rel="deprecation"; type="text/html"'
if "link" in response.headers:
response.headers["Link"] += f", {deprecation_link}"
else:
response.headers["Link"] = f"{deprecation_link}"
return response
# This API is deprecated. We do not care too much about typing here
api_experimental.after_request(add_deprecation_headers) # type: ignore[arg-type]
@api_experimental.route("/dags/<string:dag_id>/dag_runs", methods=["POST"])
@requires_authentication
def trigger_dag(dag_id):
"""
Trigger a new dag run for a Dag with an execution date of now unless
specified in the data.
"""
data = request.get_json(force=True)
run_id = None
if "run_id" in data:
run_id = data["run_id"]
conf = None
if "conf" in data:
conf = data["conf"]
if not isinstance(conf, dict):
error_message = "Dag Run conf must be a dictionary object, other types are not supported"
log.error(error_message)
response = jsonify({"error": error_message})
response.status_code = 400
return response
execution_date = None
if "execution_date" in data and data["execution_date"] is not None:
execution_date = data["execution_date"]
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
log.error("Given execution date could not be identified as a date.")
error_message = (
f"Given execution date, {execution_date}, could not be identified as a date. "
f"Example date format: 2015-11-16T14:34:15+00:00"
)
response = jsonify({"error": error_message})
response.status_code = 400
return response
replace_microseconds = execution_date is None
if "replace_microseconds" in data:
replace_microseconds = to_boolean(data["replace_microseconds"])
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date, replace_microseconds)
except AirflowException as err:
log.error(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
if getattr(g, "user", None):
log.info("User %s created %s", g.user, dr)
response = jsonify(
message=f"Created {dr}", execution_date=dr.execution_date.isoformat(), run_id=dr.run_id
)
return response
@api_experimental.route("/dags/<string:dag_id>", methods=["DELETE"])
@requires_authentication
def delete_dag(dag_id):
"""Delete all DB records related to the specified Dag."""
try:
count = delete.delete_dag(dag_id)
except AirflowException as err:
log.error(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
return jsonify(message=f"Removed {count} record(s)", count=count)
@api_experimental.route("/dags/<string:dag_id>/dag_runs", methods=["GET"])
@requires_authentication
def dag_runs(dag_id):
"""
Returns a list of Dag Runs for a specific DAG ID.
:query param state: a query string parameter '?state=queued|running|success...'
:param dag_id: String identifier of a DAG
:return: List of DAG runs of a DAG with requested state,
or all runs if the state is not specified
"""
try:
state = request.args.get("state")
dagruns = get_dag_runs(dag_id, state)
except AirflowException as err:
log.info(err)
response = jsonify(error=f"{err}")
response.status_code = 400
return response
return jsonify(dagruns)
@api_experimental.route("/test", methods=["GET"])
@requires_authentication
def test():
"""Test endpoint to check authentication."""
return jsonify(status="OK")
@api_experimental.route("/info", methods=["GET"])
@requires_authentication
def info():
"""Get Airflow Version."""
return jsonify(version=version)
@api_experimental.route("/dags/<string:dag_id>/code", methods=["GET"])
@requires_authentication
def get_dag_code(dag_id):
"""Return python code of a given dag_id."""
try:
return get_code(dag_id)
except AirflowException as err:
log.info(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
@api_experimental.route("/dags/<string:dag_id>/tasks/<string:task_id>", methods=["GET"])
@requires_authentication
def task_info(dag_id, task_id):
"""Returns a JSON with a task's public instance variables."""
try:
t_info = get_task(dag_id, task_id)
except AirflowException as err:
log.info(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
# JSONify and return.
fields = {k: str(v) for k, v in vars(t_info).items() if not k.startswith("_")}
return jsonify(fields)
# ToDo: Shouldn't this be a PUT method?
@api_experimental.route("/dags/<string:dag_id>/paused/<string:paused>", methods=["GET"])
@requires_authentication
def dag_paused(dag_id, paused):
"""(Un)pauses a dag."""
is_paused = bool(paused == "true")
models.DagModel.get_dagmodel(dag_id).set_is_paused(
is_paused=is_paused,
)
return jsonify({"response": "ok"})
@api_experimental.route("/dags/<string:dag_id>/paused", methods=["GET"])
@requires_authentication
def dag_is_paused(dag_id):
"""Get paused state of a dag."""
is_paused = models.DagModel.get_dagmodel(dag_id).is_paused
return jsonify({"is_paused": is_paused})
@api_experimental.route(
"/dags/<string:dag_id>/dag_runs/<string:execution_date>/tasks/<string:task_id>", methods=["GET"]
)
@requires_authentication
def task_instance_info(dag_id, execution_date, task_id):
"""
Returns a JSON with a task instance's public instance variables.
The format for the exec_date is expected to be
"YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will
of course need to have been encoded for URL in the request.
"""
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
log.error("Given execution date could not be identified as a date.")
error_message = (
f"Given execution date, {execution_date}, could not be identified as a date. "
f"Example date format: 2015-11-16T14:34:15+00:00"
)
response = jsonify({"error": error_message})
response.status_code = 400
return response
try:
ti_info = get_task_instance(dag_id, task_id, execution_date)
except AirflowException as err:
log.info(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
# JSONify and return.
fields = {k: str(v) for k, v in vars(ti_info).items() if not k.startswith("_")}
return jsonify(fields)
@api_experimental.route("/dags/<string:dag_id>/dag_runs/<string:execution_date>", methods=["GET"])
@requires_authentication
def dag_run_status(dag_id, execution_date):
"""
Returns a JSON with a dag_run's public instance variables.
The format for the exec_date is expected to be
"YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will
of course need to have been encoded for URL in the request.
"""
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
log.error("Given execution date could not be identified as a date.")
error_message = (
f"Given execution date, {execution_date}, could not be identified as a date. "
f"Example date format: 2015-11-16T14:34:15+00:00"
)
response = jsonify({"error": error_message})
response.status_code = 400
return response
try:
dr_info = get_dag_run_state(dag_id, execution_date)
except AirflowException as err:
log.info(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
return jsonify(dr_info)
@api_experimental.route("/latest_runs", methods=["GET"])
@requires_authentication
def latest_dag_runs():
"""Returns the latest DagRun for each DAG formatted for the UI."""
from airflow.models import DagRun
dagruns = DagRun.get_latest_runs()
payload = []
for dagrun in dagruns:
if dagrun.execution_date:
payload.append(
{
"dag_id": dagrun.dag_id,
"execution_date": dagrun.execution_date.isoformat(),
"start_date": ((dagrun.start_date or "") and dagrun.start_date.isoformat()),
"dag_run_url": url_for(
"Airflow.graph", dag_id=dagrun.dag_id, execution_date=dagrun.execution_date
),
}
)
return jsonify(items=payload) # old flask versions don't support jsonifying arrays
@api_experimental.route("/pools/<string:name>", methods=["GET"])
@requires_authentication
def get_pool(name):
"""Get pool by a given name."""
try:
pool = pool_api.get_pool(name=name)
except AirflowException as err:
log.error(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json())
@api_experimental.route("/pools", methods=["GET"])
@requires_authentication
def get_pools():
"""Get all pools."""
try:
pools = pool_api.get_pools()
except AirflowException as err:
log.error(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
else:
return jsonify([p.to_json() for p in pools])
@api_experimental.route("/pools", methods=["POST"])
@requires_authentication
def create_pool():
"""Create a pool."""
params = request.get_json(force=True)
try:
pool = pool_api.create_pool(**params)
except AirflowException as err:
log.error(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json())
@api_experimental.route("/pools/<string:name>", methods=["DELETE"])
@requires_authentication
def delete_pool(name):
"""Delete pool."""
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as err:
log.error(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json())
@api_experimental.route("/lineage/<string:dag_id>/<string:execution_date>", methods=["GET"])
@requires_authentication
def get_lineage(dag_id: str, execution_date: str):
"""Get Lineage details for a DagRun."""
# Convert string datetime into actual datetime
try:
execution_dt = timezone.parse(execution_date)
except ValueError:
log.error("Given execution date could not be identified as a date.")
error_message = (
f"Given execution date, {execution_date}, could not be identified as a date. "
f"Example date format: 2015-11-16T14:34:15+00:00"
)
response = jsonify({"error": error_message})
response.status_code = 400
return response
try:
lineage = get_lineage_api(dag_id=dag_id, execution_date=execution_dt)
except AirflowException as err:
log.error(err)
response = jsonify(error=f"{err}")
response.status_code = err.status_code
return response
else:
return jsonify(lineage)
| 14,432 | 32.565116 | 106 | py |
airflow | airflow-main/airflow/www/api/experimental/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/www/fab_security/views.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from flask import request
from flask_appbuilder import expose
from flask_appbuilder.security.decorators import has_access
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
ResetMyPasswordView,
ResetPasswordView,
RoleModelView,
UserDBModelView,
UserInfoEditView,
UserLDAPModelView,
UserOAuthModelView,
UserOIDModelView,
UserRemoteUserModelView,
UserStatsChartView,
ViewMenuModelView,
)
from flask_babel import lazy_gettext
from airflow.security import permissions
class ActionModelView(PermissionModelView):
"""Customize permission names for FAB's builtin PermissionModelView."""
class_permission_name = permissions.RESOURCE_ACTION
route_base = "/actions"
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
list_title = lazy_gettext("List Actions")
show_title = lazy_gettext("Show Action")
add_title = lazy_gettext("Add Action")
edit_title = lazy_gettext("Edit Action")
label_columns = {"name": lazy_gettext("Name")}
class PermissionPairModelView(PermissionViewModelView):
"""Customize permission names for FAB's builtin PermissionViewModelView."""
class_permission_name = permissions.RESOURCE_PERMISSION
route_base = "/permissions"
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
list_title = lazy_gettext("List Permissions")
show_title = lazy_gettext("Show Permission")
add_title = lazy_gettext("Add Permission")
edit_title = lazy_gettext("Edit Permission")
label_columns = {
"action": lazy_gettext("Action"),
"resource": lazy_gettext("Resource"),
}
list_columns = ["action", "resource"]
class CustomResetMyPasswordView(ResetMyPasswordView):
"""Customize permission names for FAB's builtin ResetMyPasswordView."""
class_permission_name = permissions.RESOURCE_MY_PASSWORD
method_permission_name = {
"this_form_get": "read",
"this_form_post": "edit",
}
base_permissions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
class CustomResetPasswordView(ResetPasswordView):
"""Customize permission names for FAB's builtin ResetPasswordView."""
class_permission_name = permissions.RESOURCE_PASSWORD
method_permission_name = {
"this_form_get": "read",
"this_form_post": "edit",
}
base_permissions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
class CustomRoleModelView(RoleModelView):
"""Customize permission names for FAB's builtin RoleModelView."""
class_permission_name = permissions.RESOURCE_ROLE
method_permission_name = {
"delete": "delete",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"add": "create",
"copy_role": "create",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
class ResourceModelView(ViewMenuModelView):
"""Customize permission names for FAB's builtin ViewMenuModelView."""
class_permission_name = permissions.RESOURCE_RESOURCE
route_base = "/resources"
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
list_title = lazy_gettext("List Resources")
show_title = lazy_gettext("Show Resource")
add_title = lazy_gettext("Add Resource")
edit_title = lazy_gettext("Edit Resource")
label_columns = {"name": lazy_gettext("Name")}
class CustomUserInfoEditView(UserInfoEditView):
"""Customize permission names for FAB's builtin UserInfoEditView."""
class_permission_name = permissions.RESOURCE_MY_PROFILE
route_base = "/userinfoeditview"
method_permission_name = {
"this_form_get": "edit",
"this_form_post": "edit",
}
base_permissions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
class CustomUserStatsChartView(UserStatsChartView):
"""Customize permission names for FAB's builtin UserStatsChartView."""
class_permission_name = permissions.RESOURCE_USER_STATS_CHART
route_base = "/userstatschartview"
method_permission_name = {
"chart": "read",
"list": "read",
}
base_permissions = [permissions.ACTION_CAN_READ]
class MultiResourceUserMixin:
"""Remaps UserModelView permissions to new resources and actions."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
"userinfoedit": permissions.RESOURCE_MY_PROFILE,
"userinfo": permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
"userinfo": "read",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"userinfoedit": "edit",
"delete": "delete",
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
@property
def class_permission_name(self):
"""Returns appropriate permission name depending on request method name."""
if request:
action_name = request.view_args.get("name")
_, method_name = request.url_rule.endpoint.rsplit(".", 1)
if method_name == "action" and action_name:
return self.class_permission_name_mapping.get(action_name, self._class_permission_name)
if method_name:
return self.class_permission_name_mapping.get(method_name, self._class_permission_name)
return self._class_permission_name
@class_permission_name.setter
def class_permission_name(self, name):
self._class_permission_name = name
@expose("/show/<pk>", methods=["GET"])
@has_access
def show(self, pk):
pk = self._deserialize_pk_if_composite(pk)
widgets = self._show(pk)
widgets["show"].template_args["actions"].pop("userinfoedit", None)
return self.render_template(
self.show_template,
pk=pk,
title=self.show_title,
widgets=widgets,
related_views=self._related_views,
)
class CustomUserDBModelView(MultiResourceUserMixin, UserDBModelView):
"""Customize permission names for FAB's builtin UserDBModelView."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
"resetmypassword": permissions.RESOURCE_MY_PASSWORD,
"resetpasswords": permissions.RESOURCE_PASSWORD,
"userinfoedit": permissions.RESOURCE_MY_PROFILE,
"userinfo": permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
"add": "create",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"delete": "delete",
"resetmypassword": "read",
"resetpasswords": "read",
"userinfo": "read",
"userinfoedit": "read",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
class CustomUserLDAPModelView(MultiResourceUserMixin, UserLDAPModelView):
"""Customize permission names for FAB's builtin UserLDAPModelView."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
"userinfoedit": permissions.RESOURCE_MY_PROFILE,
"userinfo": permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
"add": "create",
"userinfo": "read",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"userinfoedit": "edit",
"delete": "delete",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
class CustomUserOAuthModelView(MultiResourceUserMixin, UserOAuthModelView):
"""Customize permission names for FAB's builtin UserOAuthModelView."""
class CustomUserOIDModelView(MultiResourceUserMixin, UserOIDModelView):
"""Customize permission names for FAB's builtin UserOIDModelView."""
class CustomUserRemoteUserModelView(MultiResourceUserMixin, UserRemoteUserModelView):
"""Customize permission names for FAB's builtin UserRemoteUserModelView."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
"userinfoedit": permissions.RESOURCE_MY_PROFILE,
"userinfo": permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
"add": "create",
"userinfo": "read",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"userinfoedit": "edit",
"delete": "delete",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
| 10,112 | 29.738602 | 103 | py |
airflow | airflow-main/airflow/www/fab_security/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/www/fab_security/manager.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# mypy: disable-error-code=var-annotated
from __future__ import annotations
import base64
import datetime
import json
import logging
from functools import cached_property
from typing import Any
from uuid import uuid4
import re2
from flask import Flask, current_app, g, session, url_for
from flask_appbuilder import AppBuilder
from flask_appbuilder.const import (
AUTH_DB,
AUTH_LDAP,
AUTH_OAUTH,
AUTH_OID,
AUTH_REMOTE_USER,
LOGMSG_ERR_SEC_ADD_REGISTER_USER,
LOGMSG_ERR_SEC_AUTH_LDAP,
LOGMSG_ERR_SEC_AUTH_LDAP_TLS,
LOGMSG_WAR_SEC_LOGIN_FAILED,
LOGMSG_WAR_SEC_NO_USER,
LOGMSG_WAR_SEC_NOLDAP_OBJ,
)
from flask_appbuilder.security.registerviews import (
RegisterUserDBView,
RegisterUserOAuthView,
RegisterUserOIDView,
)
from flask_appbuilder.security.views import (
AuthDBView,
AuthLDAPView,
AuthOAuthView,
AuthOIDView,
AuthRemoteUserView,
PermissionModelView,
RegisterUserModelView,
ResetMyPasswordView,
ResetPasswordView,
RoleModelView,
UserDBModelView,
UserInfoEditView,
UserLDAPModelView,
UserOAuthModelView,
UserOIDModelView,
UserRemoteUserModelView,
UserStatsChartView,
)
from flask_babel import lazy_gettext as _
from flask_jwt_extended import JWTManager, current_user as current_user_jwt
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_login import AnonymousUserMixin, LoginManager, current_user
from werkzeug.security import check_password_hash, generate_password_hash
from airflow.configuration import auth_manager, conf
from airflow.www.fab_security.sqla.models import Action, Permission, RegisterUser, Resource, Role, User
# This product contains a modified portion of 'Flask App Builder' developed by Daniel Vaz Gaspar.
# (https://github.com/dpgaspar/Flask-AppBuilder).
# Copyright 2013, Daniel Vaz Gaspar
log = logging.getLogger(__name__)
def _oauth_tokengetter(token=None):
"""
Default function to return the current user oauth token
from session cookie.
"""
token = session.get("oauth")
log.debug("Token Get: %s", token)
return token
class AnonymousUser(AnonymousUserMixin):
"""User object used when no active user is logged in."""
_roles: set[tuple[str, str]] = set()
_perms: set[tuple[str, str]] = set()
@property
def roles(self):
if not self._roles:
public_role = current_app.appbuilder.get_app.config["AUTH_ROLE_PUBLIC"]
self._roles = {current_app.appbuilder.sm.find_role(public_role)} if public_role else set()
return self._roles
@roles.setter
def roles(self, roles):
self._roles = roles
self._perms = set()
@property
def perms(self):
if not self._perms:
self._perms = set()
for role in self.roles:
self._perms.update({(perm.action.name, perm.resource.name) for perm in role.permissions})
return self._perms
class BaseSecurityManager:
"""Base class to define the Security Manager interface."""
appbuilder: AppBuilder
"""The appbuilder instance for the current security manager."""
auth_view = None
""" The obj instance for authentication view """
user_view = None
""" The obj instance for user view """
registeruser_view = None
""" The obj instance for registering user view """
lm = None
""" Flask-Login LoginManager """
jwt_manager = None
""" Flask-JWT-Extended """
oid = None
""" Flask-OpenID OpenID """
oauth = None
""" Flask-OAuth """
oauth_remotes: dict[str, Any]
""" OAuth email whitelists """
oauth_whitelists: dict[str, list] = {}
""" Initialized (remote_app) providers dict {'provider_name', OBJ } """
@staticmethod
def oauth_tokengetter(token=None):
"""Authentication (OAuth) token getter function.
Override to implement your own token getter method.
"""
return _oauth_tokengetter(token)
oauth_user_info = None
user_model: type[User]
""" Override to set your own User Model """
role_model: type[Role]
""" Override to set your own Role Model """
action_model: type[Action]
""" Override to set your own Action Model """
resource_model: type[Resource]
""" Override to set your own Resource Model """
permission_model: type[Permission]
""" Override to set your own Permission Model """
registeruser_model: type[RegisterUser]
""" Override to set your own RegisterUser Model """
userdbmodelview = UserDBModelView
""" Override if you want your own user db view """
userldapmodelview = UserLDAPModelView
""" Override if you want your own user ldap view """
useroidmodelview = UserOIDModelView
""" Override if you want your own user OID view """
useroauthmodelview = UserOAuthModelView
""" Override if you want your own user OAuth view """
userremoteusermodelview = UserRemoteUserModelView
""" Override if you want your own user REMOTE_USER view """
registerusermodelview = RegisterUserModelView
authdbview = AuthDBView
""" Override if you want your own Authentication DB view """
authldapview = AuthLDAPView
""" Override if you want your own Authentication LDAP view """
authoidview = AuthOIDView
""" Override if you want your own Authentication OID view """
authoauthview = AuthOAuthView
""" Override if you want your own Authentication OAuth view """
authremoteuserview = AuthRemoteUserView
""" Override if you want your own Authentication REMOTE_USER view """
registeruserdbview = RegisterUserDBView
""" Override if you want your own register user db view """
registeruseroidview = RegisterUserOIDView
""" Override if you want your own register user OpenID view """
registeruseroauthview = RegisterUserOAuthView
""" Override if you want your own register user OAuth view """
resetmypasswordview = ResetMyPasswordView
""" Override if you want your own reset my password view """
resetpasswordview = ResetPasswordView
""" Override if you want your own reset password view """
userinfoeditview = UserInfoEditView
""" Override if you want your own User information edit view """
rolemodelview = RoleModelView
actionmodelview = PermissionModelView
userstatschartview = UserStatsChartView
permissionmodelview = PermissionModelView
@cached_property
def resourcemodelview(self):
from airflow.www.views import ResourceModelView
return ResourceModelView
def __init__(self, appbuilder):
self.appbuilder = appbuilder
app = self.appbuilder.get_app
# Base Security Config
app.config.setdefault("AUTH_ROLE_ADMIN", "Admin")
app.config.setdefault("AUTH_ROLE_PUBLIC", "Public")
app.config.setdefault("AUTH_TYPE", AUTH_DB)
# Self Registration
app.config.setdefault("AUTH_USER_REGISTRATION", False)
app.config.setdefault("AUTH_USER_REGISTRATION_ROLE", self.auth_role_public)
app.config.setdefault("AUTH_USER_REGISTRATION_ROLE_JMESPATH", None)
# Role Mapping
app.config.setdefault("AUTH_ROLES_MAPPING", {})
app.config.setdefault("AUTH_ROLES_SYNC_AT_LOGIN", False)
app.config.setdefault("AUTH_API_LOGIN_ALLOW_MULTIPLE_PROVIDERS", False)
# LDAP Config
if self.auth_type == AUTH_LDAP:
if "AUTH_LDAP_SERVER" not in app.config:
raise Exception("No AUTH_LDAP_SERVER defined on config with AUTH_LDAP authentication type.")
app.config.setdefault("AUTH_LDAP_SEARCH", "")
app.config.setdefault("AUTH_LDAP_SEARCH_FILTER", "")
app.config.setdefault("AUTH_LDAP_APPEND_DOMAIN", "")
app.config.setdefault("AUTH_LDAP_USERNAME_FORMAT", "")
app.config.setdefault("AUTH_LDAP_BIND_USER", "")
app.config.setdefault("AUTH_LDAP_BIND_PASSWORD", "")
# TLS options
app.config.setdefault("AUTH_LDAP_USE_TLS", False)
app.config.setdefault("AUTH_LDAP_ALLOW_SELF_SIGNED", False)
app.config.setdefault("AUTH_LDAP_TLS_DEMAND", False)
app.config.setdefault("AUTH_LDAP_TLS_CACERTDIR", "")
app.config.setdefault("AUTH_LDAP_TLS_CACERTFILE", "")
app.config.setdefault("AUTH_LDAP_TLS_CERTFILE", "")
app.config.setdefault("AUTH_LDAP_TLS_KEYFILE", "")
# Mapping options
app.config.setdefault("AUTH_LDAP_UID_FIELD", "uid")
app.config.setdefault("AUTH_LDAP_GROUP_FIELD", "memberOf")
app.config.setdefault("AUTH_LDAP_FIRSTNAME_FIELD", "givenName")
app.config.setdefault("AUTH_LDAP_LASTNAME_FIELD", "sn")
app.config.setdefault("AUTH_LDAP_EMAIL_FIELD", "mail")
# Rate limiting
app.config.setdefault("AUTH_RATE_LIMITED", True)
app.config.setdefault("AUTH_RATE_LIMIT", "5 per 40 second")
if self.auth_type == AUTH_OID:
from flask_openid import OpenID
self.oid = OpenID(app)
if self.auth_type == AUTH_OAUTH:
from authlib.integrations.flask_client import OAuth
self.oauth = OAuth(app)
self.oauth_remotes = {}
for _provider in self.oauth_providers:
provider_name = _provider["name"]
log.debug("OAuth providers init %s", provider_name)
obj_provider = self.oauth.register(provider_name, **_provider["remote_app"])
obj_provider._tokengetter = self.oauth_tokengetter
if not self.oauth_user_info:
self.oauth_user_info = self.get_oauth_user_info
# Whitelist only users with matching emails
if "whitelist" in _provider:
self.oauth_whitelists[provider_name] = _provider["whitelist"]
self.oauth_remotes[provider_name] = obj_provider
self._builtin_roles = self.create_builtin_roles()
# Setup Flask-Login
self.lm = self.create_login_manager(app)
# Setup Flask-Jwt-Extended
self.jwt_manager = self.create_jwt_manager(app)
# Setup Flask-Limiter
self.limiter = self.create_limiter(app)
def create_limiter(self, app: Flask) -> Limiter:
limiter = Limiter(key_func=get_remote_address)
limiter.init_app(app)
return limiter
def create_login_manager(self, app) -> LoginManager:
"""
Override to implement your custom login manager instance.
:param app: Flask app
"""
lm = LoginManager(app)
lm.anonymous_user = AnonymousUser
lm.login_view = "login"
lm.user_loader(self.load_user)
return lm
def create_jwt_manager(self, app) -> JWTManager:
"""
Override to implement your custom JWT manager instance.
:param app: Flask app
"""
jwt_manager = JWTManager()
jwt_manager.init_app(app)
jwt_manager.user_lookup_loader(self.load_user_jwt)
return jwt_manager
def create_builtin_roles(self):
"""Returns FAB builtin roles."""
return self.appbuilder.get_app.config.get("FAB_ROLES", {})
def get_roles_from_keys(self, role_keys: list[str]) -> set[RoleModelView]:
"""
Construct a list of FAB role objects, from a list of keys.
NOTE:
- keys are things like: "LDAP group DNs" or "OAUTH group names"
- we use AUTH_ROLES_MAPPING to map from keys, to FAB role names
:param role_keys: the list of FAB role keys
:return: a list of RoleModelView
"""
_roles = set()
_role_keys = set(role_keys)
for role_key, fab_role_names in self.auth_roles_mapping.items():
if role_key in _role_keys:
for fab_role_name in fab_role_names:
fab_role = self.find_role(fab_role_name)
if fab_role:
_roles.add(fab_role)
else:
log.warning("Can't find role specified in AUTH_ROLES_MAPPING: %s", fab_role_name)
return _roles
@property
def auth_type_provider_name(self):
provider_to_auth_type = {AUTH_DB: "db", AUTH_LDAP: "ldap"}
return provider_to_auth_type.get(self.auth_type)
@property
def get_url_for_registeruser(self):
"""Gets the URL for Register User."""
return url_for(f"{self.registeruser_view.endpoint}.{self.registeruser_view.default_view}")
@property
def get_user_datamodel(self):
"""Gets the User data model."""
return self.user_view.datamodel
@property
def get_register_user_datamodel(self):
"""Gets the Register User data model."""
return self.registerusermodelview.datamodel
@property
def builtin_roles(self):
"""Get the builtin roles."""
return self._builtin_roles
@property
def api_login_allow_multiple_providers(self):
return self.appbuilder.get_app.config["AUTH_API_LOGIN_ALLOW_MULTIPLE_PROVIDERS"]
@property
def auth_type(self):
"""Get the auth type."""
return self.appbuilder.get_app.config["AUTH_TYPE"]
@property
def auth_username_ci(self):
"""Gets the auth username for CI."""
return self.appbuilder.get_app.config.get("AUTH_USERNAME_CI", True)
@property
def auth_role_admin(self):
"""Gets the admin role."""
return self.appbuilder.get_app.config["AUTH_ROLE_ADMIN"]
@property
def auth_role_public(self):
"""Gets the public role."""
return self.appbuilder.get_app.config["AUTH_ROLE_PUBLIC"]
@property
def auth_ldap_server(self):
"""Gets the LDAP server object."""
return self.appbuilder.get_app.config["AUTH_LDAP_SERVER"]
@property
def auth_ldap_use_tls(self):
"""Should LDAP use TLS."""
return self.appbuilder.get_app.config["AUTH_LDAP_USE_TLS"]
@property
def auth_user_registration(self):
"""Will user self registration be allowed."""
return self.appbuilder.get_app.config["AUTH_USER_REGISTRATION"]
@property
def auth_user_registration_role(self):
"""The default user self registration role."""
return self.appbuilder.get_app.config["AUTH_USER_REGISTRATION_ROLE"]
@property
def auth_user_registration_role_jmespath(self) -> str:
"""The JMESPATH role to use for user registration."""
return self.appbuilder.get_app.config["AUTH_USER_REGISTRATION_ROLE_JMESPATH"]
@property
def auth_roles_mapping(self) -> dict[str, list[str]]:
"""The mapping of auth roles."""
return self.appbuilder.get_app.config["AUTH_ROLES_MAPPING"]
@property
def auth_roles_sync_at_login(self) -> bool:
"""Should roles be synced at login."""
return self.appbuilder.get_app.config["AUTH_ROLES_SYNC_AT_LOGIN"]
@property
def auth_ldap_search(self):
"""LDAP search object."""
return self.appbuilder.get_app.config["AUTH_LDAP_SEARCH"]
@property
def auth_ldap_search_filter(self):
"""LDAP search filter."""
return self.appbuilder.get_app.config["AUTH_LDAP_SEARCH_FILTER"]
@property
def auth_ldap_bind_user(self):
"""LDAP bind user."""
return self.appbuilder.get_app.config["AUTH_LDAP_BIND_USER"]
@property
def auth_ldap_bind_password(self):
"""LDAP bind password."""
return self.appbuilder.get_app.config["AUTH_LDAP_BIND_PASSWORD"]
@property
def auth_ldap_append_domain(self):
"""LDAP append domain."""
return self.appbuilder.get_app.config["AUTH_LDAP_APPEND_DOMAIN"]
@property
def auth_ldap_username_format(self):
"""LDAP username format."""
return self.appbuilder.get_app.config["AUTH_LDAP_USERNAME_FORMAT"]
@property
def auth_ldap_uid_field(self):
"""LDAP UID field."""
return self.appbuilder.get_app.config["AUTH_LDAP_UID_FIELD"]
@property
def auth_ldap_group_field(self) -> str:
"""LDAP group field."""
return self.appbuilder.get_app.config["AUTH_LDAP_GROUP_FIELD"]
@property
def auth_ldap_firstname_field(self):
"""LDAP first name field."""
return self.appbuilder.get_app.config["AUTH_LDAP_FIRSTNAME_FIELD"]
@property
def auth_ldap_lastname_field(self):
"""LDAP last name field."""
return self.appbuilder.get_app.config["AUTH_LDAP_LASTNAME_FIELD"]
@property
def auth_ldap_email_field(self):
"""LDAP email field."""
return self.appbuilder.get_app.config["AUTH_LDAP_EMAIL_FIELD"]
@property
def auth_ldap_bind_first(self):
"""LDAP bind first."""
return self.appbuilder.get_app.config["AUTH_LDAP_BIND_FIRST"]
@property
def auth_ldap_allow_self_signed(self):
"""LDAP allow self signed."""
return self.appbuilder.get_app.config["AUTH_LDAP_ALLOW_SELF_SIGNED"]
@property
def auth_ldap_tls_demand(self):
"""LDAP TLS demand."""
return self.appbuilder.get_app.config["AUTH_LDAP_TLS_DEMAND"]
@property
def auth_ldap_tls_cacertdir(self):
"""LDAP TLS CA certificate directory."""
return self.appbuilder.get_app.config["AUTH_LDAP_TLS_CACERTDIR"]
@property
def auth_ldap_tls_cacertfile(self):
"""LDAP TLS CA certificate file."""
return self.appbuilder.get_app.config["AUTH_LDAP_TLS_CACERTFILE"]
@property
def auth_ldap_tls_certfile(self):
"""LDAP TLS certificate file."""
return self.appbuilder.get_app.config["AUTH_LDAP_TLS_CERTFILE"]
@property
def auth_ldap_tls_keyfile(self):
"""LDAP TLS key file."""
return self.appbuilder.get_app.config["AUTH_LDAP_TLS_KEYFILE"]
@property
def openid_providers(self):
"""Openid providers."""
return self.appbuilder.get_app.config["OPENID_PROVIDERS"]
@property
def oauth_providers(self):
"""Oauth providers."""
return self.appbuilder.get_app.config["OAUTH_PROVIDERS"]
@property
def is_auth_limited(self) -> bool:
return self.appbuilder.get_app.config["AUTH_RATE_LIMITED"]
@property
def auth_rate_limit(self) -> str:
return self.appbuilder.get_app.config["AUTH_RATE_LIMIT"]
@property
def current_user(self):
"""Current user object."""
if auth_manager.is_logged_in():
return g.user
elif current_user_jwt:
return current_user_jwt
def oauth_user_info_getter(self, f):
"""
Decorator function to be the OAuth user info getter
for all the providers, receives provider and response
return a dict with the information returned from the provider.
The returned user info dict should have it's keys with the same
name as the User Model.
Use it like this an example for GitHub ::
@appbuilder.sm.oauth_user_info_getter
def my_oauth_user_info(sm, provider, response=None):
if provider == 'github':
me = sm.oauth_remotes[provider].get('user')
return {'username': me.data.get('login')}
else:
return {}
"""
def wraps(provider, response=None):
ret = f(self, provider, response=response)
# Checks if decorator is well behaved and returns a dict as supposed.
if not type(ret) == dict:
log.error("OAuth user info decorated function did not returned a dict, but: %s", type(ret))
return {}
return ret
self.oauth_user_info = wraps
return wraps
def get_oauth_token_key_name(self, provider):
"""
Returns the token_key name for the oauth provider
if none is configured defaults to oauth_token
this is configured using OAUTH_PROVIDERS and token_key key.
"""
for _provider in self.oauth_providers:
if _provider["name"] == provider:
return _provider.get("token_key", "oauth_token")
def get_oauth_token_secret_name(self, provider):
"""Gety the ``token_secret`` name for the oauth provider.
If none is configured, defaults to ``oauth_secret``. This is configured
using ``OAUTH_PROVIDERS`` and ``token_secret``.
"""
for _provider in self.oauth_providers:
if _provider["name"] == provider:
return _provider.get("token_secret", "oauth_token_secret")
def set_oauth_session(self, provider, oauth_response):
"""Set the current session with OAuth user secrets."""
# Get this provider key names for token_key and token_secret
token_key = self.appbuilder.sm.get_oauth_token_key_name(provider)
token_secret = self.appbuilder.sm.get_oauth_token_secret_name(provider)
# Save users token on encrypted session cookie
session["oauth"] = (
oauth_response[token_key],
oauth_response.get(token_secret, ""),
)
session["oauth_provider"] = provider
def get_oauth_user_info(self, provider, resp):
"""Get the OAuth user information from different OAuth APIs.
All providers have different ways to retrieve user info.
"""
# for GITHUB
if provider == "github" or provider == "githublocal":
me = self.appbuilder.sm.oauth_remotes[provider].get("user")
data = me.json()
log.debug("User info from GitHub: %s", data)
return {"username": "github_" + data.get("login")}
# for twitter
if provider == "twitter":
me = self.appbuilder.sm.oauth_remotes[provider].get("account/settings.json")
data = me.json()
log.debug("User info from Twitter: %s", data)
return {"username": "twitter_" + data.get("screen_name", "")}
# for linkedin
if provider == "linkedin":
me = self.appbuilder.sm.oauth_remotes[provider].get(
"people/~:(id,email-address,first-name,last-name)?format=json"
)
data = me.json()
log.debug("User info from LinkedIn: %s", data)
return {
"username": "linkedin_" + data.get("id", ""),
"email": data.get("email-address", ""),
"first_name": data.get("firstName", ""),
"last_name": data.get("lastName", ""),
}
# for Google
if provider == "google":
me = self.appbuilder.sm.oauth_remotes[provider].get("userinfo")
data = me.json()
log.debug("User info from Google: %s", data)
return {
"username": "google_" + data.get("id", ""),
"first_name": data.get("given_name", ""),
"last_name": data.get("family_name", ""),
"email": data.get("email", ""),
}
# for Azure AD Tenant. Azure OAuth response contains
# JWT token which has user info.
# JWT token needs to be base64 decoded.
# https://docs.microsoft.com/en-us/azure/active-directory/develop/
# active-directory-protocols-oauth-code
if provider == "azure":
log.debug("Azure response received : %s", resp)
id_token = resp["id_token"]
log.debug(str(id_token))
me = self._azure_jwt_token_parse(id_token)
log.debug("Parse JWT token : %s", me)
return {
"name": me.get("name", ""),
"email": me["upn"],
"first_name": me.get("given_name", ""),
"last_name": me.get("family_name", ""),
"id": me["oid"],
"username": me["oid"],
"role_keys": me.get("roles", []),
}
# for OpenShift
if provider == "openshift":
me = self.appbuilder.sm.oauth_remotes[provider].get("apis/user.openshift.io/v1/users/~")
data = me.json()
log.debug("User info from OpenShift: %s", data)
return {"username": "openshift_" + data.get("metadata").get("name")}
# for Okta
if provider == "okta":
me = self.appbuilder.sm.oauth_remotes[provider].get("userinfo")
data = me.json()
log.debug("User info from Okta: %s", data)
return {
"username": "okta_" + data.get("sub", ""),
"first_name": data.get("given_name", ""),
"last_name": data.get("family_name", ""),
"email": data.get("email", ""),
"role_keys": data.get("groups", []),
}
# for Keycloak
if provider in ["keycloak", "keycloak_before_17"]:
me = self.appbuilder.sm.oauth_remotes[provider].get("openid-connect/userinfo")
me.raise_for_status()
data = me.json()
log.debug("User info from Keycloak: %s", data)
return {
"username": data.get("preferred_username", ""),
"first_name": data.get("given_name", ""),
"last_name": data.get("family_name", ""),
"email": data.get("email", ""),
}
else:
return {}
def _azure_parse_jwt(self, id_token):
jwt_token_parts = r"^([^\.\s]*)\.([^\.\s]+)\.([^\.\s]*)$"
matches = re2.search(jwt_token_parts, id_token)
if not matches or len(matches.groups()) < 3:
log.error("Unable to parse token.")
return {}
return {
"header": matches.group(1),
"Payload": matches.group(2),
"Sig": matches.group(3),
}
def _azure_jwt_token_parse(self, id_token):
jwt_split_token = self._azure_parse_jwt(id_token)
if not jwt_split_token:
return
jwt_payload = jwt_split_token["Payload"]
# Prepare for base64 decoding
payload_b64_string = jwt_payload
payload_b64_string += "=" * (4 - (len(jwt_payload) % 4))
decoded_payload = base64.urlsafe_b64decode(payload_b64_string.encode("ascii"))
if not decoded_payload:
log.error("Payload of id_token could not be base64 url decoded.")
return
jwt_decoded_payload = json.loads(decoded_payload.decode("utf-8"))
return jwt_decoded_payload
def register_views(self):
if not self.appbuilder.app.config.get("FAB_ADD_SECURITY_VIEWS", True):
return
if self.auth_user_registration:
if self.auth_type == AUTH_DB:
self.registeruser_view = self.registeruserdbview()
elif self.auth_type == AUTH_OID:
self.registeruser_view = self.registeruseroidview()
elif self.auth_type == AUTH_OAUTH:
self.registeruser_view = self.registeruseroauthview()
if self.registeruser_view:
self.appbuilder.add_view_no_menu(self.registeruser_view)
self.appbuilder.add_view_no_menu(self.resetpasswordview())
self.appbuilder.add_view_no_menu(self.resetmypasswordview())
self.appbuilder.add_view_no_menu(self.userinfoeditview())
if self.auth_type == AUTH_DB:
self.user_view = self.userdbmodelview
self.auth_view = self.authdbview()
elif self.auth_type == AUTH_LDAP:
self.user_view = self.userldapmodelview
self.auth_view = self.authldapview()
elif self.auth_type == AUTH_OAUTH:
self.user_view = self.useroauthmodelview
self.auth_view = self.authoauthview()
elif self.auth_type == AUTH_REMOTE_USER:
self.user_view = self.userremoteusermodelview
self.auth_view = self.authremoteuserview()
else:
self.user_view = self.useroidmodelview
self.auth_view = self.authoidview()
if self.auth_user_registration:
pass
# self.registeruser_view = self.registeruseroidview()
# self.appbuilder.add_view_no_menu(self.registeruser_view)
self.appbuilder.add_view_no_menu(self.auth_view)
# this needs to be done after the view is added, otherwise the blueprint
# is not initialized
if self.is_auth_limited:
self.limiter.limit(self.auth_rate_limit, methods=["POST"])(self.auth_view.blueprint)
self.user_view = self.appbuilder.add_view(
self.user_view,
"List Users",
icon="fa-user",
label=_("List Users"),
category="Security",
category_icon="fa-cogs",
category_label=_("Security"),
)
role_view = self.appbuilder.add_view(
self.rolemodelview,
"List Roles",
icon="fa-group",
label=_("List Roles"),
category="Security",
category_icon="fa-cogs",
)
role_view.related_views = [self.user_view.__class__]
if self.userstatschartview:
self.appbuilder.add_view(
self.userstatschartview,
"User's Statistics",
icon="fa-bar-chart-o",
label=_("User's Statistics"),
category="Security",
)
if self.auth_user_registration:
self.appbuilder.add_view(
self.registerusermodelview,
"User's Statistics",
icon="fa-user-plus",
label=_("User Registrations"),
category="Security",
)
self.appbuilder.menu.add_separator("Security")
if self.appbuilder.app.config.get("FAB_ADD_SECURITY_PERMISSION_VIEW", True):
self.appbuilder.add_view(
self.actionmodelview,
"Actions",
icon="fa-lock",
label=_("Actions"),
category="Security",
)
if self.appbuilder.app.config.get("FAB_ADD_SECURITY_VIEW_MENU_VIEW", True):
self.appbuilder.add_view(
self.resourcemodelview,
"Resources",
icon="fa-list-alt",
label=_("Resources"),
category="Security",
)
if self.appbuilder.app.config.get("FAB_ADD_SECURITY_PERMISSION_VIEWS_VIEW", True):
self.appbuilder.add_view(
self.permissionmodelview,
"Permission Pairs",
icon="fa-link",
label=_("Permissions"),
category="Security",
)
def create_db(self):
"""Setups the DB, creates admin and public roles if they don't exist."""
roles_mapping = self.appbuilder.get_app.config.get("FAB_ROLES_MAPPING", {})
for pk, name in roles_mapping.items():
self.update_role(pk, name)
for role_name in self.builtin_roles:
self.add_role(role_name)
if self.auth_role_admin not in self.builtin_roles:
self.add_role(self.auth_role_admin)
self.add_role(self.auth_role_public)
if self.count_users() == 0 and self.auth_role_public != self.auth_role_admin:
log.warning(LOGMSG_WAR_SEC_NO_USER)
def reset_password(self, userid, password):
"""
Change/Reset a user's password for authdb.
Password will be hashed and saved.
:param userid:
the user.id to reset the password
:param password:
The clear text password to reset and save hashed on the db
"""
user = self.get_user_by_id(userid)
user.password = generate_password_hash(password)
self.update_user(user)
def update_user_auth_stat(self, user, success=True):
"""Update user authentication stats.
This is done upon successful/unsuccessful authentication attempts.
:param user:
The identified (but possibly not successfully authenticated) user
model
:param success:
Defaults to true, if true increments login_count, updates
last_login, and resets fail_login_count to 0, if false increments
fail_login_count on user model.
"""
if not user.login_count:
user.login_count = 0
if not user.fail_login_count:
user.fail_login_count = 0
if success:
user.login_count += 1
user.last_login = datetime.datetime.now()
user.fail_login_count = 0
else:
user.fail_login_count += 1
self.update_user(user)
def _rotate_session_id(self):
"""Rotate the session ID.
We need to do this upon successful authentication when using the
database session backend.
"""
if conf.get("webserver", "SESSION_BACKEND") == "database":
session.sid = str(uuid4())
def auth_user_db(self, username, password):
"""
Method for authenticating user, auth db style.
:param username:
The username or registered email address
:param password:
The password, will be tested against hashed password on db
"""
if username is None or username == "":
return None
user = self.find_user(username=username)
if user is None:
user = self.find_user(email=username)
if user is None or (not user.is_active):
# Balance failure and success
check_password_hash(
"pbkdf2:sha256:150000$Z3t6fmj2$22da622d94a1f8118"
"c0976a03d2f18f680bfff877c9a965db9eedc51bc0be87c",
"password",
)
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
elif check_password_hash(user.password, password):
self._rotate_session_id()
self.update_user_auth_stat(user, True)
return user
else:
self.update_user_auth_stat(user, False)
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
def _search_ldap(self, ldap, con, username):
"""
Searches LDAP for user.
:param ldap: The ldap module reference
:param con: The ldap connection
:param username: username to match with AUTH_LDAP_UID_FIELD
:return: ldap object array
"""
# always check AUTH_LDAP_SEARCH is set before calling this method
assert self.auth_ldap_search, "AUTH_LDAP_SEARCH must be set"
# build the filter string for the LDAP search
if self.auth_ldap_search_filter:
filter_str = f"(&{self.auth_ldap_search_filter}({self.auth_ldap_uid_field}={username}))"
else:
filter_str = f"({self.auth_ldap_uid_field}={username})"
# build what fields to request in the LDAP search
request_fields = [
self.auth_ldap_firstname_field,
self.auth_ldap_lastname_field,
self.auth_ldap_email_field,
]
if len(self.auth_roles_mapping) > 0:
request_fields.append(self.auth_ldap_group_field)
# perform the LDAP search
log.debug(
"LDAP search for %r with fields %s in scope %r", filter_str, request_fields, self.auth_ldap_search
)
raw_search_result = con.search_s(
self.auth_ldap_search, ldap.SCOPE_SUBTREE, filter_str, request_fields
)
log.debug("LDAP search returned: %s", raw_search_result)
# Remove any search referrals from results
search_result = [
(dn, attrs) for dn, attrs in raw_search_result if dn is not None and isinstance(attrs, dict)
]
# only continue if 0 or 1 results were returned
if len(search_result) > 1:
log.error(
"LDAP search for %r in scope '%a' returned multiple results",
self.auth_ldap_search,
filter_str,
)
return None, None
try:
# extract the DN
user_dn = search_result[0][0]
# extract the other attributes
user_info = search_result[0][1]
# return
return user_dn, user_info
except (IndexError, NameError):
return None, None
def _ldap_calculate_user_roles(self, user_attributes: dict[str, list[bytes]]) -> list[str]:
user_role_objects = set()
# apply AUTH_ROLES_MAPPING
if len(self.auth_roles_mapping) > 0:
user_role_keys = self.ldap_extract_list(user_attributes, self.auth_ldap_group_field)
user_role_objects.update(self.get_roles_from_keys(user_role_keys))
# apply AUTH_USER_REGISTRATION
if self.auth_user_registration:
registration_role_name = self.auth_user_registration_role
# lookup registration role in flask db
fab_role = self.find_role(registration_role_name)
if fab_role:
user_role_objects.add(fab_role)
else:
log.warning("Can't find AUTH_USER_REGISTRATION role: %s", registration_role_name)
return list(user_role_objects)
def _ldap_bind_indirect(self, ldap, con) -> None:
"""
Attempt to bind to LDAP using the AUTH_LDAP_BIND_USER.
:param ldap: The ldap module reference
:param con: The ldap connection
"""
# always check AUTH_LDAP_BIND_USER is set before calling this method
assert self.auth_ldap_bind_user, "AUTH_LDAP_BIND_USER must be set"
try:
log.debug("LDAP bind indirect TRY with username: %r", self.auth_ldap_bind_user)
con.simple_bind_s(self.auth_ldap_bind_user, self.auth_ldap_bind_password)
log.debug("LDAP bind indirect SUCCESS with username: %r", self.auth_ldap_bind_user)
except ldap.INVALID_CREDENTIALS as ex:
log.error("AUTH_LDAP_BIND_USER and AUTH_LDAP_BIND_PASSWORD are not valid LDAP bind credentials")
raise ex
@staticmethod
def _ldap_bind(ldap, con, dn: str, password: str) -> bool:
"""Validates/binds the provided dn/password with the LDAP sever."""
try:
log.debug("LDAP bind TRY with username: %r", dn)
con.simple_bind_s(dn, password)
log.debug("LDAP bind SUCCESS with username: %r", dn)
return True
except ldap.INVALID_CREDENTIALS:
return False
@staticmethod
def ldap_extract(ldap_dict: dict[str, list[bytes]], field_name: str, fallback: str) -> str:
raw_value = ldap_dict.get(field_name, [b""])
# decode - if empty string, default to fallback, otherwise take first element
return raw_value[0].decode("utf-8") or fallback
@staticmethod
def ldap_extract_list(ldap_dict: dict[str, list[bytes]], field_name: str) -> list[str]:
raw_list = ldap_dict.get(field_name, [])
# decode - removing empty strings
return [x.decode("utf-8") for x in raw_list if x.decode("utf-8")]
def auth_user_ldap(self, username, password):
"""
Method for authenticating user with LDAP.
NOTE: this depends on python-ldap module
:param username: the username
:param password: the password
"""
# If no username is provided, go away
if (username is None) or username == "":
return None
# Search the DB for this user
user = self.find_user(username=username)
# If user is not active, go away
if user and (not user.is_active):
return None
# If user is not registered, and not self-registration, go away
if (not user) and (not self.auth_user_registration):
return None
# Ensure python-ldap is installed
try:
import ldap
except ImportError:
log.error("python-ldap library is not installed")
return None
try:
# LDAP certificate settings
if self.auth_ldap_tls_cacertdir:
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, self.auth_ldap_tls_cacertdir)
if self.auth_ldap_tls_cacertfile:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.auth_ldap_tls_cacertfile)
if self.auth_ldap_tls_certfile:
ldap.set_option(ldap.OPT_X_TLS_CERTFILE, self.auth_ldap_tls_certfile)
if self.auth_ldap_tls_keyfile:
ldap.set_option(ldap.OPT_X_TLS_KEYFILE, self.auth_ldap_tls_keyfile)
if self.auth_ldap_allow_self_signed:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
elif self.auth_ldap_tls_demand:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
ldap.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
# Initialise LDAP connection
con = ldap.initialize(self.auth_ldap_server)
con.set_option(ldap.OPT_REFERRALS, 0)
if self.auth_ldap_use_tls:
try:
con.start_tls_s()
except Exception:
log.error(LOGMSG_ERR_SEC_AUTH_LDAP_TLS.format(self.auth_ldap_server))
return None
# Define variables, so we can check if they are set in later steps
user_dn = None
user_attributes = {}
# Flow 1 - (Indirect Search Bind):
# - in this flow, special bind credentials are used to perform the
# LDAP search
# - in this flow, AUTH_LDAP_SEARCH must be set
if self.auth_ldap_bind_user:
# Bind with AUTH_LDAP_BIND_USER/AUTH_LDAP_BIND_PASSWORD
# (authorizes for LDAP search)
self._ldap_bind_indirect(ldap, con)
# Search for `username`
# - returns the `user_dn` needed for binding to validate credentials
# - returns the `user_attributes` needed for
# AUTH_USER_REGISTRATION/AUTH_ROLES_SYNC_AT_LOGIN
if self.auth_ldap_search:
user_dn, user_attributes = self._search_ldap(ldap, con, username)
else:
log.error("AUTH_LDAP_SEARCH must be set when using AUTH_LDAP_BIND_USER")
return None
# If search failed, go away
if user_dn is None:
log.info(LOGMSG_WAR_SEC_NOLDAP_OBJ.format(username))
return None
# Bind with user_dn/password (validates credentials)
if not self._ldap_bind(ldap, con, user_dn, password):
if user:
self.update_user_auth_stat(user, False)
# Invalid credentials, go away
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
# Flow 2 - (Direct Search Bind):
# - in this flow, the credentials provided by the end-user are used
# to perform the LDAP search
# - in this flow, we only search LDAP if AUTH_LDAP_SEARCH is set
# - features like AUTH_USER_REGISTRATION & AUTH_ROLES_SYNC_AT_LOGIN
# will only work if AUTH_LDAP_SEARCH is set
else:
# Copy the provided username (so we can apply formatters)
bind_username = username
# update `bind_username` by applying AUTH_LDAP_APPEND_DOMAIN
# - for Microsoft AD, which allows binding with userPrincipalName
if self.auth_ldap_append_domain:
bind_username = bind_username + "@" + self.auth_ldap_append_domain
# Update `bind_username` by applying AUTH_LDAP_USERNAME_FORMAT
# - for transforming the username into a DN,
# for example: "uid=%s,ou=example,o=test"
if self.auth_ldap_username_format:
bind_username = self.auth_ldap_username_format % bind_username
# Bind with bind_username/password
# (validates credentials & authorizes for LDAP search)
if not self._ldap_bind(ldap, con, bind_username, password):
if user:
self.update_user_auth_stat(user, False)
# Invalid credentials, go away
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(bind_username))
return None
# Search for `username` (if AUTH_LDAP_SEARCH is set)
# - returns the `user_attributes`
# needed for AUTH_USER_REGISTRATION/AUTH_ROLES_SYNC_AT_LOGIN
# - we search on `username` not `bind_username`,
# because AUTH_LDAP_APPEND_DOMAIN and AUTH_LDAP_USERNAME_FORMAT
# would result in an invalid search filter
if self.auth_ldap_search:
user_dn, user_attributes = self._search_ldap(ldap, con, username)
# If search failed, go away
if user_dn is None:
log.info(LOGMSG_WAR_SEC_NOLDAP_OBJ.format(username))
return None
# Sync the user's roles
if user and user_attributes and self.auth_roles_sync_at_login:
user.roles = self._ldap_calculate_user_roles(user_attributes)
log.debug("Calculated new roles for user=%r as: %s", user_dn, user.roles)
# If the user is new, register them
if (not user) and user_attributes and self.auth_user_registration:
user = self.add_user(
username=username,
first_name=self.ldap_extract(user_attributes, self.auth_ldap_firstname_field, ""),
last_name=self.ldap_extract(user_attributes, self.auth_ldap_lastname_field, ""),
email=self.ldap_extract(
user_attributes,
self.auth_ldap_email_field,
f"{username}@email.notfound",
),
role=self._ldap_calculate_user_roles(user_attributes),
)
log.debug("New user registered: %s", user)
# If user registration failed, go away
if not user:
log.info(LOGMSG_ERR_SEC_ADD_REGISTER_USER.format(username))
return None
# LOGIN SUCCESS (only if user is now registered)
if user:
self._rotate_session_id()
self.update_user_auth_stat(user)
return user
else:
return None
except ldap.LDAPError as e:
msg = None
if isinstance(e, dict):
msg = getattr(e, "message", None)
if (msg is not None) and ("desc" in msg):
log.error(LOGMSG_ERR_SEC_AUTH_LDAP.format(e.message["desc"]))
return None
else:
log.error(e)
return None
def auth_user_oid(self, email):
"""
Openid user Authentication.
:param email: user's email to authenticate
"""
user = self.find_user(email=email)
if user is None or (not user.is_active):
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(email))
return None
else:
self._rotate_session_id()
self.update_user_auth_stat(user)
return user
def auth_user_remote_user(self, username):
"""
REMOTE_USER user Authentication.
:param username: user's username for remote auth
"""
user = self.find_user(username=username)
# User does not exist, create one if auto user registration.
if user is None and self.auth_user_registration:
user = self.add_user(
# All we have is REMOTE_USER, so we set
# the other fields to blank.
username=username,
first_name=username,
last_name="-",
email=username + "@email.notfound",
role=self.find_role(self.auth_user_registration_role),
)
# If user does not exist on the DB and not auto user registration,
# or user is inactive, go away.
elif user is None or (not user.is_active):
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
self._rotate_session_id()
self.update_user_auth_stat(user)
return user
def _oauth_calculate_user_roles(self, userinfo) -> list[str]:
user_role_objects = set()
# apply AUTH_ROLES_MAPPING
if len(self.auth_roles_mapping) > 0:
user_role_keys = userinfo.get("role_keys", [])
user_role_objects.update(self.get_roles_from_keys(user_role_keys))
# apply AUTH_USER_REGISTRATION_ROLE
if self.auth_user_registration:
registration_role_name = self.auth_user_registration_role
# if AUTH_USER_REGISTRATION_ROLE_JMESPATH is set,
# use it for the registration role
if self.auth_user_registration_role_jmespath:
import jmespath
registration_role_name = jmespath.search(self.auth_user_registration_role_jmespath, userinfo)
# lookup registration role in flask db
fab_role = self.find_role(registration_role_name)
if fab_role:
user_role_objects.add(fab_role)
else:
log.warning("Can't find AUTH_USER_REGISTRATION role: %s", registration_role_name)
return list(user_role_objects)
def auth_user_oauth(self, userinfo):
"""
Method for authenticating user with OAuth.
:userinfo: dict with user information
(keys are the same as User model columns)
"""
# extract the username from `userinfo`
if "username" in userinfo:
username = userinfo["username"]
elif "email" in userinfo:
username = userinfo["email"]
else:
log.error("OAUTH userinfo does not have username or email %s", userinfo)
return None
# If username is empty, go away
if (username is None) or username == "":
return None
# Search the DB for this user
user = self.find_user(username=username)
# If user is not active, go away
if user and (not user.is_active):
return None
# If user is not registered, and not self-registration, go away
if (not user) and (not self.auth_user_registration):
return None
# Sync the user's roles
if user and self.auth_roles_sync_at_login:
user.roles = self._oauth_calculate_user_roles(userinfo)
log.debug("Calculated new roles for user=%r as: %s", username, user.roles)
# If the user is new, register them
if (not user) and self.auth_user_registration:
user = self.add_user(
username=username,
first_name=userinfo.get("first_name", ""),
last_name=userinfo.get("last_name", ""),
email=userinfo.get("email", "") or f"{username}@email.notfound",
role=self._oauth_calculate_user_roles(userinfo),
)
log.debug("New user registered: %s", user)
# If user registration failed, go away
if not user:
log.error("Error creating a new OAuth user %s", username)
return None
# LOGIN SUCCESS (only if user is now registered)
if user:
self._rotate_session_id()
self.update_user_auth_stat(user)
return user
else:
return None
def _has_access_builtin_roles(self, role, action_name: str, resource_name: str) -> bool:
"""Checks permission on builtin role."""
perms = self.builtin_roles.get(role.name, [])
for _resource_name, _action_name in perms:
if re2.match(_resource_name, resource_name) and re2.match(_action_name, action_name):
return True
return False
def _get_user_permission_resources(
self, user: User | None, action_name: str, resource_names: list[str] | None = None
) -> set[str]:
"""Get resource names with a certain action name that a user has access to.
Mainly used to fetch all menu permissions on a single db call, will also
check public permissions and builtin roles
"""
if not resource_names:
resource_names = []
db_role_ids = []
if user is None:
# include public role
roles = [self.get_public_role()]
else:
roles = user.roles
# First check against builtin (statically configured) roles
# because no database query is needed
result = set()
for role in roles:
if role.name in self.builtin_roles:
for resource_name in resource_names:
if self._has_access_builtin_roles(role, action_name, resource_name):
result.add(resource_name)
else:
db_role_ids.append(role.id)
# Then check against database-stored roles
role_resource_names = [
perm.resource.name for perm in self.filter_roles_by_perm_with_action(action_name, db_role_ids)
]
result.update(role_resource_names)
return result
def get_user_menu_access(self, menu_names: list[str] | None = None) -> set[str]:
if auth_manager.is_logged_in():
return self._get_user_permission_resources(g.user, "menu_access", resource_names=menu_names)
elif current_user_jwt:
return self._get_user_permission_resources(
# the current_user_jwt is a lazy proxy, so we need to ignore type checking
current_user_jwt, # type: ignore[arg-type]
"menu_access",
resource_names=menu_names,
)
else:
return self._get_user_permission_resources(None, "menu_access", resource_names=menu_names)
def add_limit_view(self, baseview):
if not baseview.limits:
return
for limit in baseview.limits:
self.limiter.limit(
limit_value=limit.limit_value,
key_func=limit.key_func,
per_method=limit.per_method,
methods=limit.methods,
error_message=limit.error_message,
exempt_when=limit.exempt_when,
override_defaults=limit.override_defaults,
deduct_when=limit.deduct_when,
on_breach=limit.on_breach,
cost=limit.cost,
)(baseview.blueprint)
def add_permissions_view(self, base_action_names, resource_name): # Keep name for compatibility with FAB.
"""
Adds an action on a resource to the backend.
:param base_action_names:
list of permissions from view (all exposed methods):
'can_add','can_edit' etc...
:param resource_name:
name of the resource to add
"""
resource = self.create_resource(resource_name)
perms = self.get_resource_permissions(resource)
if not perms:
# No permissions yet on this view
for action_name in base_action_names:
action = self.create_permission(action_name, resource_name)
if self.auth_role_admin not in self.builtin_roles:
admin_role = self.find_role(self.auth_role_admin)
self.add_permission_to_role(admin_role, action)
else:
# Permissions on this view exist but....
admin_role = self.find_role(self.auth_role_admin)
for action_name in base_action_names:
# Check if base view permissions exist
if not self.perms_include_action(perms, action_name):
action = self.create_permission(action_name, resource_name)
if self.auth_role_admin not in self.builtin_roles:
self.add_permission_to_role(admin_role, action)
for perm in perms:
if perm.action is None:
# Skip this perm, it has a null permission
continue
if perm.action.name not in base_action_names:
# perm to delete
roles = self.get_all_roles()
# del permission from all roles
for role in roles:
# TODO: An action can't be removed from a role.
# This is a bug in FAB. It has been reported.
self.remove_permission_from_role(role, perm)
self.delete_permission(perm.action.name, resource_name)
elif self.auth_role_admin not in self.builtin_roles and perm not in admin_role.permissions:
# Role Admin must have all permissions
self.add_permission_to_role(admin_role, perm)
def add_permissions_menu(self, resource_name):
"""
Adds menu_access to resource on permission_resource.
:param resource_name:
The resource name
"""
self.create_resource(resource_name)
perm = self.get_permission("menu_access", resource_name)
if not perm:
perm = self.create_permission("menu_access", resource_name)
if self.auth_role_admin not in self.builtin_roles:
role_admin = self.find_role(self.auth_role_admin)
self.add_permission_to_role(role_admin, perm)
def security_cleanup(self, baseviews, menus):
"""
Will cleanup all unused permissions from the database.
:param baseviews: A list of BaseViews class
:param menus: Menu class
"""
resources = self.get_all_resources()
roles = self.get_all_roles()
for resource in resources:
found = False
for baseview in baseviews:
if resource.name == baseview.class_permission_name:
found = True
break
if menus.find(resource.name):
found = True
if not found:
permissions = self.get_resource_permissions(resource)
for permission in permissions:
for role in roles:
self.remove_permission_from_role(role, permission)
self.delete_permission(permission.action.name, resource.name)
self.delete_resource(resource.name)
def find_register_user(self, registration_hash):
"""Generic function to return user registration."""
raise NotImplementedError
def add_register_user(self, username, first_name, last_name, email, password="", hashed_password=""):
"""Generic function to add user registration."""
raise NotImplementedError
def del_register_user(self, register_user):
"""Generic function to delete user registration."""
raise NotImplementedError
def get_user_by_id(self, pk):
"""Generic function to return user by it's id (pk)."""
raise NotImplementedError
def find_user(self, username=None, email=None):
"""Generic function find a user by it's username or email."""
raise NotImplementedError
def get_all_users(self):
"""Generic function that returns all existing users."""
raise NotImplementedError
def get_role_permissions_from_db(self, role_id: int) -> list[Permission]:
"""Get all DB permissions from a role id."""
raise NotImplementedError
def add_user(self, username, first_name, last_name, email, role, password=""):
"""Generic function to create user."""
raise NotImplementedError
def update_user(self, user):
"""
Generic function to update user.
:param user: User model to update to database
"""
raise NotImplementedError
def count_users(self):
"""Generic function to count the existing users."""
raise NotImplementedError
def find_role(self, name):
raise NotImplementedError
def add_role(self, name):
raise NotImplementedError
def update_role(self, role_id, name):
raise NotImplementedError
def get_all_roles(self):
raise NotImplementedError
def get_public_role(self):
"""Returns all permissions from public role."""
raise NotImplementedError
def get_action(self, name: str) -> Action:
"""
Gets an existing action record.
:param name: name
:return: Action record, if it exists
"""
raise NotImplementedError
def filter_roles_by_perm_with_action(self, permission_name: str, role_ids: list[int]):
raise NotImplementedError
def permission_exists_in_one_or_more_roles(
self, resource_name: str, action_name: str, role_ids: list[int]
) -> bool:
"""Finds and returns permission views for a group of roles."""
raise NotImplementedError
def create_action(self, name):
"""
Adds a permission to the backend, model permission.
:param name:
name of the permission: 'can_add','can_edit' etc...
"""
raise NotImplementedError
def delete_action(self, name: str) -> bool:
"""
Deletes a permission action.
:param name: Name of action to delete (e.g. can_read).
:return: Whether or not delete was successful.
"""
raise NotImplementedError
"""
----------------------
PRIMITIVES VIEW MENU
----------------------
"""
def get_resource(self, name: str):
"""
Returns a resource record by name, if it exists.
:param name: Name of resource
"""
raise NotImplementedError
def get_all_resources(self) -> list[Resource]:
"""
Gets all existing resource records.
:return: List of all resources
"""
raise NotImplementedError
def create_resource(self, name):
"""
Create a resource with the given name.
:param name: The name of the resource to create created.
"""
raise NotImplementedError
def delete_resource(self, name):
"""
Deletes a Resource from the backend.
:param name:
name of the Resource
"""
raise NotImplementedError
"""
----------------------
PERMISSION VIEW MENU
----------------------
"""
def get_permission(self, action_name: str, resource_name: str) -> Permission | None:
"""
Gets a permission made with the given action->resource pair, if the permission already exists.
:param action_name: Name of action
:param resource_name: Name of resource
:return: The existing permission
"""
raise NotImplementedError
def get_resource_permissions(self, resource) -> Permission:
"""
Retrieve permission pairs associated with a specific resource object.
:param resource: Object representing a single resource.
:return: Action objects representing resource->action pair
"""
raise NotImplementedError
def create_permission(self, action_name: str, resource_name: str) -> Permission | None:
"""
Creates a permission linking an action and resource.
:param action_name: Name of existing action
:param resource_name: Name of existing resource
:return: Resource created
"""
raise NotImplementedError
def delete_permission(self, action_name: str, resource_name: str) -> None:
"""
Deletes the permission linking an action->resource pair. Doesn't delete the
underlying action or resource.
:param action_name: Name of existing action
:param resource_name: Name of existing resource
:return: None
"""
raise NotImplementedError
def perms_include_action(self, perms, action_name):
raise NotImplementedError
def add_permission_to_role(self, role, permission) -> None:
"""
Add an existing permission pair to a role.
:param role: The role about to get a new permission.
:param permission: The permission pair to add to a role.
:return: None
"""
raise NotImplementedError
def remove_permission_from_role(self, role, permission) -> None:
"""
Remove a permission pair from a role.
:param role: User role containing permissions.
:param permission: Object representing resource-> action pair
"""
raise NotImplementedError
def load_user(self, user_id):
"""Load user by ID."""
return self.get_user_by_id(int(user_id))
def load_user_jwt(self, _jwt_header, jwt_data):
identity = jwt_data["sub"]
user = self.load_user(identity)
# Set flask g.user to JWT user, we can't do it on before request
g.user = user
return user
@staticmethod
def before_request():
"""Hook runs before request."""
g.user = current_user
| 66,720 | 37.082763 | 110 | py |
airflow | airflow-main/airflow/www/fab_security/sqla/models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
# This product contains a modified portion of 'Flask App Builder' developed by Daniel Vaz Gaspar.
# (https://github.com/dpgaspar/Flask-AppBuilder).
# Copyright 2013, Daniel Vaz Gaspar
from typing import TYPE_CHECKING
from flask import current_app, g
from flask_appbuilder.models.sqla import Model
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Index,
Integer,
String,
Table,
UniqueConstraint,
event,
func,
)
from sqlalchemy.orm import backref, declared_attr, relationship
from airflow.models.base import Base
"""
Compatibility note: The models in this file are duplicated from Flask AppBuilder.
"""
# Use airflow metadata to create the tables
Model.metadata = Base.metadata
if TYPE_CHECKING:
try:
from sqlalchemy import Identity
except Exception:
Identity = None
class Action(Model):
"""Represents permission actions such as `can_read`."""
__tablename__ = "ab_permission"
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
def __repr__(self):
return self.name
class Resource(Model):
"""Represents permission object such as `User` or `Dag`."""
__tablename__ = "ab_view_menu"
id = Column(Integer, primary_key=True)
name = Column(String(250), unique=True, nullable=False)
def __eq__(self, other):
return (isinstance(other, self.__class__)) and (self.name == other.name)
def __neq__(self, other):
return self.name != other.name
def __repr__(self):
return self.name
assoc_permission_role = Table(
"ab_permission_view_role",
Model.metadata,
Column("id", Integer, primary_key=True),
Column("permission_view_id", Integer, ForeignKey("ab_permission_view.id")),
Column("role_id", Integer, ForeignKey("ab_role.id")),
UniqueConstraint("permission_view_id", "role_id"),
)
class Role(Model):
"""Represents a user role to which permissions can be assigned."""
__tablename__ = "ab_role"
id = Column(Integer, primary_key=True)
name = Column(String(64), unique=True, nullable=False)
permissions = relationship("Permission", secondary=assoc_permission_role, backref="role", lazy="joined")
def __repr__(self):
return self.name
class Permission(Model):
"""Permission pair comprised of an Action + Resource combo."""
__tablename__ = "ab_permission_view"
__table_args__ = (UniqueConstraint("permission_id", "view_menu_id"),)
id = Column(Integer, primary_key=True)
action_id = Column("permission_id", Integer, ForeignKey("ab_permission.id"))
action = relationship(
"Action",
uselist=False,
lazy="joined",
)
resource_id = Column("view_menu_id", Integer, ForeignKey("ab_view_menu.id"))
resource = relationship(
"Resource",
uselist=False,
lazy="joined",
)
def __repr__(self):
return str(self.action).replace("_", " ") + " on " + str(self.resource)
assoc_user_role = Table(
"ab_user_role",
Model.metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("role_id", Integer, ForeignKey("ab_role.id")),
UniqueConstraint("user_id", "role_id"),
)
class User(Model):
"""Represents an Airflow user which has roles assigned to it."""
__tablename__ = "ab_user"
id = Column(Integer, primary_key=True)
first_name = Column(String(256), nullable=False)
last_name = Column(String(256), nullable=False)
username = Column(
String(512).with_variant(String(512, collation="NOCASE"), "sqlite"), unique=True, nullable=False
)
password = Column(String(256))
active = Column(Boolean)
email = Column(String(512), unique=True, nullable=False)
last_login = Column(DateTime)
login_count = Column(Integer)
fail_login_count = Column(Integer)
roles = relationship("Role", secondary=assoc_user_role, backref="user", lazy="selectin")
created_on = Column(DateTime, default=datetime.datetime.now, nullable=True)
changed_on = Column(DateTime, default=datetime.datetime.now, nullable=True)
@declared_attr
def created_by_fk(self):
return Column(Integer, ForeignKey("ab_user.id"), default=self.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(self):
return Column(Integer, ForeignKey("ab_user.id"), default=self.get_user_id, nullable=True)
created_by = relationship(
"User",
backref=backref("created", uselist=True),
remote_side=[id],
primaryjoin="User.created_by_fk == User.id",
uselist=False,
)
changed_by = relationship(
"User",
backref=backref("changed", uselist=True),
remote_side=[id],
primaryjoin="User.changed_by_fk == User.id",
uselist=False,
)
@classmethod
def get_user_id(cls):
try:
return g.user.id
except Exception:
return None
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return self.active
@property
def is_anonymous(self):
return False
@property
def perms(self):
if not self._perms:
# Using the ORM here is _slow_ (Creating lots of objects to then throw them away) since this is in
# the path for every request. Avoid it if we can!
if current_app:
sm = current_app.appbuilder.sm
self._perms: set[tuple[str, str]] = set(
sm.get_session.query(sm.action_model.name, sm.resource_model.name)
.join(sm.permission_model.action)
.join(sm.permission_model.resource)
.join(sm.permission_model.role)
.filter(sm.role_model.user.contains(self))
.all()
)
else:
self._perms = {
(perm.action.name, perm.resource.name) for role in self.roles for perm in role.permissions
}
return self._perms
def get_id(self):
return self.id
def get_full_name(self):
return f"{self.first_name} {self.last_name}"
def __repr__(self):
return self.get_full_name()
_perms = None
class RegisterUser(Model):
"""Represents a user registration."""
__tablename__ = "ab_register_user"
id = Column(Integer, primary_key=True)
first_name = Column(String(256), nullable=False)
last_name = Column(String(256), nullable=False)
username = Column(
String(512).with_variant(String(512, collation="NOCASE"), "sqlite"), unique=True, nullable=False
)
password = Column(String(256))
email = Column(String(512), nullable=False)
registration_date = Column(DateTime, default=datetime.datetime.now, nullable=True)
registration_hash = Column(String(256))
@event.listens_for(User.__table__, "before_create")
def add_index_on_ab_user_username_postgres(table, conn, **kw):
if conn.dialect.name != "postgresql":
return
table.indexes.add(Index("idx_ab_user_username", func.lower(table.c.username), unique=True))
@event.listens_for(RegisterUser.__table__, "before_create")
def add_index_on_ab_register_user_username_postgres(table, conn, **kw):
if conn.dialect.name != "postgresql":
return
table.indexes.add(Index("idx_ab_register_user_username", func.lower(table.c.username), unique=True))
| 8,364 | 30.566038 | 110 | py |
airflow | airflow-main/airflow/www/fab_security/sqla/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/www/fab_security/sqla/manager.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import uuid
from flask_appbuilder import const as c
from flask_appbuilder.models.sqla import Base
from flask_appbuilder.models.sqla.interface import SQLAInterface
from sqlalchemy import and_, func, inspect, literal
from sqlalchemy.orm.exc import MultipleResultsFound
from werkzeug.security import generate_password_hash
from airflow.www.fab_security.manager import BaseSecurityManager
from airflow.www.fab_security.sqla.models import (
Action,
Permission,
RegisterUser,
Resource,
Role,
User,
assoc_permission_role,
)
log = logging.getLogger(__name__)
class SecurityManager(BaseSecurityManager):
"""
Responsible for authentication, registering security views,
role and permission auto management.
If you want to change anything just inherit and override, then
pass your own security manager to AppBuilder.
"""
user_model = User
""" Override to set your own User Model """
role_model = Role
""" Override to set your own Role Model """
action_model = Action
resource_model = Resource
permission_model = Permission
registeruser_model = RegisterUser
def __init__(self, appbuilder):
"""
Class constructor.
:param appbuilder: F.A.B AppBuilder main object
"""
super().__init__(appbuilder)
user_datamodel = SQLAInterface(self.user_model)
if self.auth_type == c.AUTH_DB:
self.userdbmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_LDAP:
self.userldapmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_OID:
self.useroidmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_OAUTH:
self.useroauthmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_REMOTE_USER:
self.userremoteusermodelview.datamodel = user_datamodel
if self.userstatschartview:
self.userstatschartview.datamodel = user_datamodel
if self.auth_user_registration:
self.registerusermodelview.datamodel = SQLAInterface(self.registeruser_model)
self.rolemodelview.datamodel = SQLAInterface(self.role_model)
self.actionmodelview.datamodel = SQLAInterface(self.action_model)
self.resourcemodelview.datamodel = SQLAInterface(self.resource_model)
self.permissionmodelview.datamodel = SQLAInterface(self.permission_model)
self.create_db()
@property
def get_session(self):
return self.appbuilder.get_session
def register_views(self):
super().register_views()
def create_db(self):
try:
engine = self.get_session.get_bind(mapper=None, clause=None)
inspector = inspect(engine)
if "ab_user" not in inspector.get_table_names():
log.info(c.LOGMSG_INF_SEC_NO_DB)
Base.metadata.create_all(engine)
log.info(c.LOGMSG_INF_SEC_ADD_DB)
super().create_db()
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_CREATE_DB.format(str(e)))
exit(1)
def find_register_user(self, registration_hash):
return (
self.get_session.query(self.registeruser_model)
.filter(self.registeruser_model.registration_hash == registration_hash)
.scalar()
)
def add_register_user(self, username, first_name, last_name, email, password="", hashed_password=""):
"""
Add a registration request for the user.
:rtype : RegisterUser
"""
register_user = self.registeruser_model()
register_user.username = username
register_user.email = email
register_user.first_name = first_name
register_user.last_name = last_name
if hashed_password:
register_user.password = hashed_password
else:
register_user.password = generate_password_hash(password)
register_user.registration_hash = str(uuid.uuid1())
try:
self.get_session.add(register_user)
self.get_session.commit()
return register_user
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_REGISTER_USER.format(str(e)))
self.appbuilder.get_session.rollback()
return None
def del_register_user(self, register_user):
"""
Deletes registration object from database.
:param register_user: RegisterUser object to delete
"""
try:
self.get_session.delete(register_user)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_REGISTER_USER.format(str(e)))
self.get_session.rollback()
return False
def find_user(self, username=None, email=None):
"""Finds user by username or email."""
if username:
try:
if self.auth_username_ci:
return (
self.get_session.query(self.user_model)
.filter(func.lower(self.user_model.username) == func.lower(username))
.one_or_none()
)
else:
return (
self.get_session.query(self.user_model)
.filter(func.lower(self.user_model.username) == func.lower(username))
.one_or_none()
)
except MultipleResultsFound:
log.error("Multiple results found for user %s", username)
return None
elif email:
try:
return self.get_session.query(self.user_model).filter_by(email=email).one_or_none()
except MultipleResultsFound:
log.error("Multiple results found for user with email %s", email)
return None
def get_all_users(self):
return self.get_session.query(self.user_model).all()
def add_user(
self,
username,
first_name,
last_name,
email,
role,
password="",
hashed_password="",
):
"""Generic function to create user."""
try:
user = self.user_model()
user.first_name = first_name
user.last_name = last_name
user.username = username
user.email = email
user.active = True
user.roles = role if isinstance(role, list) else [role]
if hashed_password:
user.password = hashed_password
else:
user.password = generate_password_hash(password)
self.get_session.add(user)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_USER.format(username))
return user
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_USER.format(str(e)))
self.get_session.rollback()
return False
def count_users(self):
return self.get_session.query(func.count(self.user_model.id)).scalar()
def update_user(self, user):
try:
self.get_session.merge(user)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_UPD_USER.format(user))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_UPD_USER.format(str(e)))
self.get_session.rollback()
return False
def get_user_by_id(self, pk):
return self.get_session.get(self.user_model, pk)
def add_role(self, name: str) -> Role:
role = self.find_role(name)
if role is None:
try:
role = self.role_model()
role.name = name
self.get_session.add(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_ROLE.format(name))
return role
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_ROLE.format(str(e)))
self.get_session.rollback()
return role
def update_role(self, role_id, name: str) -> Role | None:
role = self.get_session.get(self.role_model, role_id)
if not role:
return None
try:
role.name = name
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_UPD_ROLE.format(role))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_UPD_ROLE.format(str(e)))
self.get_session.rollback()
return None
return role
def find_role(self, name):
return self.get_session.query(self.role_model).filter_by(name=name).one_or_none()
def get_all_roles(self):
return self.get_session.query(self.role_model).all()
def get_public_role(self):
return self.get_session.query(self.role_model).filter_by(name=self.auth_role_public).one_or_none()
def get_action(self, name: str) -> Action:
"""
Gets an existing action record.
:param name: name
:return: Action record, if it exists
"""
return self.get_session.query(self.action_model).filter_by(name=name).one_or_none()
def permission_exists_in_one_or_more_roles(
self, resource_name: str, action_name: str, role_ids: list[int]
) -> bool:
"""
Method to efficiently check if a certain permission exists
on a list of role id's. This is used by `has_access`.
:param resource_name: The view's name to check if exists on one of the roles
:param action_name: The permission name to check if exists
:param role_ids: a list of Role ids
:return: Boolean
"""
q = (
self.appbuilder.get_session.query(self.permission_model)
.join(
assoc_permission_role,
and_(self.permission_model.id == assoc_permission_role.c.permission_view_id),
)
.join(self.role_model)
.join(self.action_model)
.join(self.resource_model)
.filter(
self.resource_model.name == resource_name,
self.action_model.name == action_name,
self.role_model.id.in_(role_ids),
)
.exists()
)
# Special case for MSSQL/Oracle (works on PG and MySQL > 8)
if self.appbuilder.get_session.bind.dialect.name in ("mssql", "oracle"):
return self.appbuilder.get_session.query(literal(True)).filter(q).scalar()
return self.appbuilder.get_session.query(q).scalar()
def filter_roles_by_perm_with_action(self, action_name: str, role_ids: list[int]):
"""Find roles with permission."""
return (
self.appbuilder.get_session.query(self.permission_model)
.join(
assoc_permission_role,
and_(self.permission_model.id == assoc_permission_role.c.permission_view_id),
)
.join(self.role_model)
.join(self.action_model)
.join(self.resource_model)
.filter(
self.action_model.name == action_name,
self.role_model.id.in_(role_ids),
)
).all()
def create_action(self, name):
"""
Adds an action to the backend, model action.
:param name:
name of the action: 'can_add','can_edit' etc...
"""
action = self.get_action(name)
if action is None:
try:
action = self.action_model()
action.name = name
self.get_session.add(action)
self.get_session.commit()
return action
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMISSION.format(str(e)))
self.get_session.rollback()
return action
def delete_action(self, name: str) -> bool:
"""
Deletes a permission action.
:param name: Name of action to delete (e.g. can_read).
:return: Whether or not delete was successful.
"""
action = self.get_action(name)
if not action:
log.warning(c.LOGMSG_WAR_SEC_DEL_PERMISSION.format(name))
return False
try:
perms = (
self.get_session.query(self.permission_model)
.filter(self.permission_model.action == action)
.all()
)
if perms:
log.warning(c.LOGMSG_WAR_SEC_DEL_PERM_PVM.format(action, perms))
return False
self.get_session.delete(action)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMISSION.format(str(e)))
self.get_session.rollback()
return False
def get_resource(self, name: str) -> Resource:
"""
Returns a resource record by name, if it exists.
:param name: Name of resource
:return: Resource record
"""
return self.get_session.query(self.resource_model).filter_by(name=name).one_or_none()
def get_all_resources(self) -> list[Resource]:
"""
Gets all existing resource records.
:return: List of all resources
"""
return self.get_session.query(self.resource_model).all()
def create_resource(self, name) -> Resource:
"""
Create a resource with the given name.
:param name: The name of the resource to create created.
:return: The FAB resource created.
"""
resource = self.get_resource(name)
if resource is None:
try:
resource = self.resource_model()
resource.name = name
self.get_session.add(resource)
self.get_session.commit()
return resource
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_VIEWMENU.format(str(e)))
self.get_session.rollback()
return resource
def delete_resource(self, name: str) -> bool:
"""
Deletes a Resource from the backend.
:param name:
name of the resource
"""
resource = self.get_resource(name)
if not resource:
log.warning(c.LOGMSG_WAR_SEC_DEL_VIEWMENU.format(name))
return False
try:
perms = (
self.get_session.query(self.permission_model)
.filter(self.permission_model.resource == resource)
.all()
)
if perms:
log.warning(c.LOGMSG_WAR_SEC_DEL_VIEWMENU_PVM.format(resource, perms))
return False
self.get_session.delete(resource)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMISSION.format(str(e)))
self.get_session.rollback()
return False
"""
----------------------
PERMISSION VIEW MENU
----------------------
"""
def get_permission(
self,
action_name: str,
resource_name: str,
) -> Permission | None:
"""
Gets a permission made with the given action->resource pair, if the permission already exists.
:param action_name: Name of action
:param resource_name: Name of resource
:return: The existing permission
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
if action and resource:
return (
self.get_session.query(self.permission_model)
.filter_by(action=action, resource=resource)
.one_or_none()
)
return None
def get_resource_permissions(self, resource: Resource) -> Permission:
"""
Retrieve permission pairs associated with a specific resource object.
:param resource: Object representing a single resource.
:return: Action objects representing resource->action pair
"""
return self.get_session.query(self.permission_model).filter_by(resource_id=resource.id).all()
def create_permission(self, action_name, resource_name) -> Permission | None:
"""
Adds a permission on a resource to the backend.
:param action_name:
name of the action to add: 'can_add','can_edit' etc...
:param resource_name:
name of the resource to add
"""
if not (action_name and resource_name):
return None
perm = self.get_permission(action_name, resource_name)
if perm:
return perm
resource = self.create_resource(resource_name)
action = self.create_action(action_name)
perm = self.permission_model()
perm.resource_id, perm.action_id = resource.id, action.id
try:
self.get_session.add(perm)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_PERMVIEW.format(str(perm)))
return perm
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMVIEW.format(str(e)))
self.get_session.rollback()
return None
def delete_permission(self, action_name: str, resource_name: str) -> None:
"""
Deletes the permission linking an action->resource pair. Doesn't delete the
underlying action or resource.
:param action_name: Name of existing action
:param resource_name: Name of existing resource
:return: None
"""
if not (action_name and resource_name):
return
perm = self.get_permission(action_name, resource_name)
if not perm:
return
roles = (
self.get_session.query(self.role_model).filter(self.role_model.permissions.contains(perm)).first()
)
if roles:
log.warning(c.LOGMSG_WAR_SEC_DEL_PERMVIEW.format(resource_name, action_name, roles))
return
try:
# delete permission on resource
self.get_session.delete(perm)
self.get_session.commit()
# if no more permission on permission view, delete permission
if not self.get_session.query(self.permission_model).filter_by(action=perm.action).all():
self.delete_action(perm.action.name)
log.info(c.LOGMSG_INF_SEC_DEL_PERMVIEW.format(action_name, resource_name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMVIEW.format(str(e)))
self.get_session.rollback()
def perms_include_action(self, perms, action_name):
for perm in perms:
if perm.action and perm.action.name == action_name:
return True
return False
def add_permission_to_role(self, role: Role, permission: Permission | None) -> None:
"""
Add an existing permission pair to a role.
:param role: The role about to get a new permission.
:param permission: The permission pair to add to a role.
:return: None
"""
if permission and permission not in role.permissions:
try:
role.permissions.append(permission)
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_PERMROLE.format(str(permission), role.name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMROLE.format(str(e)))
self.get_session.rollback()
def remove_permission_from_role(self, role: Role, permission: Permission) -> None:
"""
Remove a permission pair from a role.
:param role: User role containing permissions.
:param permission: Object representing resource-> action pair
"""
if permission in role.permissions:
try:
role.permissions.remove(permission)
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_DEL_PERMROLE.format(str(permission), role.name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMROLE.format(str(e)))
self.get_session.rollback()
| 21,377 | 35.668954 | 110 | py |
airflow | airflow-main/airflow/www/extensions/init_cache.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import hashlib
from tempfile import gettempdir
from flask_caching import Cache
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
HASH_METHOD_MAPPING = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha224": hashlib.sha224,
"sha256": hashlib.sha256,
"sha384": hashlib.sha384,
"sha512": hashlib.sha512,
}
def init_cache(app):
webserver_caching_hash_method = conf.get(
section="webserver", key="CACHING_HASH_METHOD", fallback="md5"
).casefold()
cache_config = {"CACHE_TYPE": "flask_caching.backends.filesystem", "CACHE_DIR": gettempdir()}
mapped_hash_method = HASH_METHOD_MAPPING.get(webserver_caching_hash_method)
if mapped_hash_method is None:
raise AirflowConfigException(
f"Unsupported webserver caching hash method: `{webserver_caching_hash_method}`."
)
cache_config["CACHE_OPTIONS"] = {"hash_method": mapped_hash_method}
Cache(app=app, config=cache_config)
| 1,819 | 33.339623 | 97 | py |
airflow | airflow-main/airflow/www/extensions/init_robots.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
log = logging.getLogger(__name__)
def init_robots(app):
"""
Add X-Robots-Tag header. Use it to avoid search engines indexing airflow. This mitigates some
of the risk associated with exposing Airflow to the public internet, however it does not
address the real security risks associated with such a deployment.
See also: https://developers.google.com/search/docs/advanced/robots/robots_meta_tag#xrobotstag
"""
def apply_robot_tag(response):
response.headers["X-Robots-Tag"] = "noindex, nofollow"
return response
app.after_request(apply_robot_tag)
| 1,436 | 36.815789 | 98 | py |
airflow | airflow-main/airflow/www/extensions/init_views.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import warnings
from functools import cached_property
from os import path
from connexion import FlaskApi, ProblemException, Resolver
from connexion.decorators.validation import RequestBodyValidator
from connexion.exceptions import BadRequestProblem
from flask import Flask, request
from airflow.api_connexion.exceptions import common_error_handler
from airflow.configuration import conf
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.security import permissions
from airflow.utils.yaml import safe_load
log = logging.getLogger(__name__)
# airflow/www/extensions/init_views.py => airflow/
ROOT_APP_DIR = path.abspath(path.join(path.dirname(__file__), path.pardir, path.pardir))
def init_flash_views(app):
"""Init main app view - redirect to FAB."""
from airflow.www.blueprints import routes
app.register_blueprint(routes)
def init_appbuilder_views(app):
"""Initialize Web UI views."""
from airflow.models import import_all_models
import_all_models()
from airflow.www import views
appbuilder = app.appbuilder
# Remove the session from scoped_session registry to avoid
# reusing a session with a disconnected connection
appbuilder.session.remove()
appbuilder.add_view_no_menu(views.AutocompleteView())
appbuilder.add_view_no_menu(views.Airflow())
appbuilder.add_view(
views.DagRunModelView,
permissions.RESOURCE_DAG_RUN,
category=permissions.RESOURCE_BROWSE_MENU,
category_icon="fa-globe",
)
appbuilder.add_view(
views.JobModelView, permissions.RESOURCE_JOB, category=permissions.RESOURCE_BROWSE_MENU
)
appbuilder.add_view(
views.LogModelView, permissions.RESOURCE_AUDIT_LOG, category=permissions.RESOURCE_BROWSE_MENU
)
appbuilder.add_view(
views.VariableModelView, permissions.RESOURCE_VARIABLE, category=permissions.RESOURCE_ADMIN_MENU
)
appbuilder.add_view(
views.TaskInstanceModelView,
permissions.RESOURCE_TASK_INSTANCE,
category=permissions.RESOURCE_BROWSE_MENU,
)
appbuilder.add_view(
views.TaskRescheduleModelView,
permissions.RESOURCE_TASK_RESCHEDULE,
category=permissions.RESOURCE_BROWSE_MENU,
)
appbuilder.add_view(
views.TriggerModelView,
permissions.RESOURCE_TRIGGER,
category=permissions.RESOURCE_BROWSE_MENU,
)
appbuilder.add_view(
views.ConfigurationView,
permissions.RESOURCE_CONFIG,
category=permissions.RESOURCE_ADMIN_MENU,
category_icon="fa-user",
)
appbuilder.add_view(
views.ConnectionModelView, permissions.RESOURCE_CONNECTION, category=permissions.RESOURCE_ADMIN_MENU
)
appbuilder.add_view(
views.SlaMissModelView, permissions.RESOURCE_SLA_MISS, category=permissions.RESOURCE_BROWSE_MENU
)
appbuilder.add_view(
views.PluginView, permissions.RESOURCE_PLUGIN, category=permissions.RESOURCE_ADMIN_MENU
)
appbuilder.add_view(
views.ProviderView, permissions.RESOURCE_PROVIDER, category=permissions.RESOURCE_ADMIN_MENU
)
appbuilder.add_view(
views.PoolModelView, permissions.RESOURCE_POOL, category=permissions.RESOURCE_ADMIN_MENU
)
appbuilder.add_view(
views.XComModelView, permissions.RESOURCE_XCOM, category=permissions.RESOURCE_ADMIN_MENU
)
appbuilder.add_view(
views.DagDependenciesView,
permissions.RESOURCE_DAG_DEPENDENCIES,
category=permissions.RESOURCE_BROWSE_MENU,
)
# add_view_no_menu to change item position.
# I added link in extensions.init_appbuilder_links.init_appbuilder_links
appbuilder.add_view_no_menu(views.RedocView)
def init_plugins(app):
"""Integrate Flask and FAB with plugins."""
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
appbuilder = app.appbuilder
for view in plugins_manager.flask_appbuilder_views:
name = view.get("name")
if name:
log.debug("Adding view %s with menu", name)
appbuilder.add_view(view["view"], name, category=view["category"])
else:
# if 'name' key is missing, intent is to add view without menu
log.debug("Adding view %s without menu", str(type(view["view"])))
appbuilder.add_view_no_menu(view["view"])
for menu_link in sorted(
plugins_manager.flask_appbuilder_menu_links, key=lambda x: (x.get("category", ""), x["name"])
):
log.debug("Adding menu link %s to %s", menu_link["name"], menu_link["href"])
appbuilder.add_link(**menu_link)
for blue_print in plugins_manager.flask_blueprints:
log.debug("Adding blueprint %s:%s", blue_print["name"], blue_print["blueprint"].import_name)
app.register_blueprint(blue_print["blueprint"])
def init_error_handlers(app: Flask):
"""Add custom errors handlers."""
from airflow.www import views
app.register_error_handler(500, views.show_traceback)
app.register_error_handler(404, views.not_found)
def set_cors_headers_on_response(response):
"""Add response headers."""
allow_headers = conf.get("api", "access_control_allow_headers")
allow_methods = conf.get("api", "access_control_allow_methods")
allow_origins = conf.get("api", "access_control_allow_origins")
if allow_headers:
response.headers["Access-Control-Allow-Headers"] = allow_headers
if allow_methods:
response.headers["Access-Control-Allow-Methods"] = allow_methods
if allow_origins == "*":
response.headers["Access-Control-Allow-Origin"] = "*"
elif allow_origins:
allowed_origins = allow_origins.split(" ")
origin = request.environ.get("HTTP_ORIGIN", allowed_origins[0])
if origin in allowed_origins:
response.headers["Access-Control-Allow-Origin"] = origin
return response
class _LazyResolution:
"""OpenAPI endpoint that lazily resolves the function on first use.
This is a stand-in replacement for ``connexion.Resolution`` that implements
its public attributes ``function`` and ``operation_id``, but the function
is only resolved when it is first accessed.
"""
def __init__(self, resolve_func, operation_id):
self._resolve_func = resolve_func
self.operation_id = operation_id
@cached_property
def function(self):
return self._resolve_func(self.operation_id)
class _LazyResolver(Resolver):
"""OpenAPI endpoint resolver that loads lazily on first use.
This re-implements ``connexion.Resolver.resolve()`` to not eagerly resolve
the endpoint function (and thus avoid importing it in the process), but only
return a placeholder that will be actually resolved when the contained
function is accessed.
"""
def resolve(self, operation):
operation_id = self.resolve_operation_id(operation)
return _LazyResolution(self.resolve_function_from_operation_id, operation_id)
class _CustomErrorRequestBodyValidator(RequestBodyValidator):
"""Custom request body validator that overrides error messages.
By default, Connextion emits a very generic *None is not of type 'object'*
error when receiving an empty request body (with the view specifying the
body as non-nullable). We overrides it to provide a more useful message.
"""
def validate_schema(self, data, url):
if not self.is_null_value_valid and data is None:
raise BadRequestProblem(detail="Request body must not be empty")
return super().validate_schema(data, url)
def init_api_connexion(app: Flask) -> None:
"""Initialize Stable API."""
base_path = "/api/v1"
from airflow.www import views
@app.errorhandler(404)
def _handle_api_not_found(ex):
if request.path.startswith(base_path):
# 404 errors are never handled on the blueprint level
# unless raised from a view func so actual 404 errors,
# i.e. "no route for it" defined, need to be handled
# here on the application level
return common_error_handler(ex)
else:
return views.not_found(ex)
@app.errorhandler(405)
def _handle_method_not_allowed(ex):
if request.path.startswith(base_path):
return common_error_handler(ex)
else:
return views.method_not_allowed(ex)
with open(path.join(ROOT_APP_DIR, "api_connexion", "openapi", "v1.yaml")) as f:
specification = safe_load(f)
api_bp = FlaskApi(
specification=specification,
resolver=_LazyResolver(),
base_path=base_path,
options={
"swagger_ui": conf.getboolean("webserver", "enable_swagger_ui", fallback=True),
"swagger_path": path.join(ROOT_APP_DIR, "www", "static", "dist", "swagger-ui"),
},
strict_validation=True,
validate_responses=True,
validator_map={"body": _CustomErrorRequestBodyValidator},
).blueprint
api_bp.after_request(set_cors_headers_on_response)
app.register_blueprint(api_bp)
app.register_error_handler(ProblemException, common_error_handler)
app.extensions["csrf"].exempt(api_bp)
def init_api_internal(app: Flask, standalone_api: bool = False) -> None:
"""Initialize Internal API."""
if not standalone_api and not conf.getboolean("webserver", "run_internal_api", fallback=False):
return
with open(path.join(ROOT_APP_DIR, "api_internal", "openapi", "internal_api_v1.yaml")) as f:
specification = safe_load(f)
api_bp = FlaskApi(
specification=specification,
base_path="/internal_api/v1",
options={"swagger_ui": conf.getboolean("webserver", "enable_swagger_ui", fallback=True)},
strict_validation=True,
validate_responses=True,
).blueprint
api_bp.after_request(set_cors_headers_on_response)
app.register_blueprint(api_bp)
app.after_request_funcs.setdefault(api_bp.name, []).append(set_cors_headers_on_response)
app.extensions["csrf"].exempt(api_bp)
def init_api_experimental(app):
"""Initialize Experimental API."""
if not conf.getboolean("api", "enable_experimental_api", fallback=False):
return
from airflow.www.api.experimental import endpoints
warnings.warn(
"The experimental REST API is deprecated. Please migrate to the stable REST API. "
"Please note that the experimental API do not have access control. "
"The authenticated user has full access.",
RemovedInAirflow3Warning,
)
app.register_blueprint(endpoints.api_experimental, url_prefix="/api/experimental")
app.extensions["csrf"].exempt(endpoints.api_experimental)
| 11,566 | 36.555195 | 108 | py |
airflow | airflow-main/airflow/www/extensions/init_session.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from flask import session as builtin_flask_session
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.www.session import AirflowDatabaseSessionInterface, AirflowSecureCookieSessionInterface
def init_airflow_session_interface(app):
"""Set airflow session interface."""
config = app.config.copy()
selected_backend = conf.get("webserver", "SESSION_BACKEND")
# A bit of a misnomer - normally cookies expire whenever the browser is closed
# or when they hit their expiry datetime, whichever comes first. "Permanent"
# cookies only expire when they hit their expiry datetime, and can outlive
# the browser being closed.
permanent_cookie = config.get("SESSION_PERMANENT", True)
if selected_backend == "securecookie":
app.session_interface = AirflowSecureCookieSessionInterface()
if permanent_cookie:
def make_session_permanent():
builtin_flask_session.permanent = True
app.before_request(make_session_permanent)
elif selected_backend == "database":
app.session_interface = AirflowDatabaseSessionInterface(
app=app,
db=None,
permanent=permanent_cookie,
# Typically these would be configurable with Flask-Session,
# but we will set them explicitly instead as they don't make
# sense to have configurable in Airflow's use case
table="session",
key_prefix="",
use_signer=True,
)
else:
raise AirflowConfigException(
"Unrecognized session backend specified in "
f"web_server_session_backend: '{selected_backend}'. Please set "
"this to either 'database' or 'securecookie'."
)
| 2,621 | 41.290323 | 100 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.