response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Set the dag run's state to running.
Set for a specific execution date and its task instances to running. | def set_dag_run_state_to_running(
*,
dag: DAG,
execution_date: datetime | None = None,
run_id: str | None = None,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the dag run's state to running.
Set for a specific execution date and its task instances to running.
"""
return __set_dag_run_state_to_running_or_queued(
new_state=DagRunState.RUNNING,
dag=dag,
execution_date=execution_date,
run_id=run_id,
commit=commit,
session=session,
) |
Set the dag run's state to queued.
Set for a specific execution date and its task instances to queued. | def set_dag_run_state_to_queued(
*,
dag: DAG,
execution_date: datetime | None = None,
run_id: str | None = None,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the dag run's state to queued.
Set for a specific execution date and its task instances to queued.
"""
return __set_dag_run_state_to_running_or_queued(
new_state=DagRunState.QUEUED,
dag=dag,
execution_date=execution_date,
run_id=run_id,
commit=commit,
session=session,
) |
Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags | def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
run_id: str | None = None,
conf: dict | str | None = None,
execution_date: datetime | None = None,
replace_microseconds: bool = True,
) -> list[DagRun | None]:
"""Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
if dag is None or dag_id not in dag_bag.dags:
raise DagNotFound(f"Dag id {dag_id} not found")
execution_date = execution_date or timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if dag.default_args and "start_date" in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and execution_date < min_dag_start_date:
raise ValueError(
f"The execution_date [{execution_date.isoformat()}] should be >= start_date "
f"[{min_dag_start_date.isoformat()}] from DAG's default_args"
)
logical_date = timezone.coerce_datetime(execution_date)
data_interval = dag.timetable.infer_manual_data_interval(run_after=logical_date)
run_id = run_id or dag.timetable.generate_run_id(
run_type=DagRunType.MANUAL, logical_date=logical_date, data_interval=data_interval
)
dag_run = DagRun.find_duplicate(dag_id=dag_id, execution_date=execution_date, run_id=run_id)
if dag_run:
raise DagRunAlreadyExists(dag_run=dag_run, execution_date=execution_date, run_id=run_id)
run_conf = None
if conf:
run_conf = conf if isinstance(conf, dict) else json.loads(conf)
dag_runs = []
dags_to_run = [dag, *dag.subdags]
for _dag in dags_to_run:
dag_run = _dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=DagRunState.QUEUED,
conf=run_conf,
external_trigger=True,
dag_hash=dag_bag.dags_hash.get(dag_id),
data_interval=data_interval,
)
dag_runs.append(dag_run)
return dag_runs |
Triggers execution of DAG specified by dag_id.
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run triggered - even if more than one Dag Runs were triggered or None | def trigger_dag(
dag_id: str,
run_id: str | None = None,
conf: dict | str | None = None,
execution_date: datetime | None = None,
replace_microseconds: bool = True,
) -> DagRun | None:
"""Triggers execution of DAG specified by dag_id.
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
"""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {dag_id} not found in DagModel")
dagbag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
triggers = _trigger_dag(
dag_id=dag_id,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
return triggers[0] if triggers else None |
Return python code of a given dag_id.
:param dag_id: DAG id
:return: code of the DAG | def get_code(dag_id: str) -> str:
"""Return python code of a given dag_id.
:param dag_id: DAG id
:return: code of the DAG
"""
dag = check_and_get_dag(dag_id=dag_id)
try:
return DagCode.get_code_by_fileloc(dag.fileloc)
except (OSError, DagCodeNotFound) as exception:
error_message = f"Error {exception} while reading Dag id {dag_id} Code"
raise AirflowException(error_message, exception) |
Return a list of Dag Runs for a specific DAG ID.
:param dag_id: String identifier of a DAG
:param state: queued|running|success...
:return: List of DAG runs of a DAG with requested state,
or all runs if the state is not specified | def get_dag_runs(dag_id: str, state: str | None = None) -> list[dict[str, Any]]:
"""
Return a list of Dag Runs for a specific DAG ID.
:param dag_id: String identifier of a DAG
:param state: queued|running|success...
:return: List of DAG runs of a DAG with requested state,
or all runs if the state is not specified
"""
check_and_get_dag(dag_id=dag_id)
dag_runs = []
state = DagRunState(state.lower()) if state else None
for run in DagRun.find(dag_id=dag_id, state=state):
dag_runs.append(
{
"id": run.id,
"run_id": run.run_id,
"state": run.state,
"dag_id": run.dag_id,
"execution_date": run.execution_date.isoformat(),
"start_date": ((run.start_date or "") and run.start_date.isoformat()),
"dag_run_url": url_for("Airflow.graph", dag_id=run.dag_id, execution_date=run.execution_date),
}
)
return dag_runs |
Return the Dag Run state identified by the given dag_id and execution_date.
:param dag_id: DAG id
:param execution_date: execution date
:return: Dictionary storing state of the object | def get_dag_run_state(dag_id: str, execution_date: datetime) -> dict[str, str]:
"""Return the Dag Run state identified by the given dag_id and execution_date.
:param dag_id: DAG id
:param execution_date: execution date
:return: Dictionary storing state of the object
"""
dag = check_and_get_dag(dag_id=dag_id)
dagrun = check_and_get_dagrun(dag, execution_date)
return {"state": dagrun.get_state()} |
Get lineage information for dag specified. | def get_lineage(
dag_id: str, execution_date: datetime.datetime, *, session: Session = NEW_SESSION
) -> dict[str, dict[str, Any]]:
"""Get lineage information for dag specified."""
dag = check_and_get_dag(dag_id)
dagrun = check_and_get_dagrun(dag, execution_date)
inlets = XCom.get_many(dag_ids=dag_id, run_id=dagrun.run_id, key=PIPELINE_INLETS, session=session)
outlets = XCom.get_many(dag_ids=dag_id, run_id=dagrun.run_id, key=PIPELINE_OUTLETS, session=session)
lineage: dict[str, dict[str, Any]] = defaultdict(dict)
for meta in inlets:
lineage[meta.task_id]["inlets"] = meta.value
for meta in outlets:
lineage[meta.task_id]["outlets"] = meta.value
return {"task_ids": dict(lineage)} |
Return the task object identified by the given dag_id and task_id. | def get_task(dag_id: str, task_id: str) -> TaskInstance:
"""Return the task object identified by the given dag_id and task_id."""
dag = check_and_get_dag(dag_id, task_id)
# Return the task.
return dag.get_task(task_id) |
Return the task instance identified by the given dag_id, task_id and execution_date. | def get_task_instance(dag_id: str, task_id: str, execution_date: datetime) -> TaskInstance:
"""Return the task instance identified by the given dag_id, task_id and execution_date."""
dag = check_and_get_dag(dag_id, task_id)
dagrun = check_and_get_dagrun(dag=dag, execution_date=execution_date)
# Get task instance object and check that it exists
task_instance = dagrun.get_task_instance(task_id)
if not task_instance:
error_message = f"Task {task_id} instance for date {execution_date} not found"
raise TaskInstanceNotFound(error_message)
# API methods has access to the database.
if isinstance(task_instance, TaskInstance):
return task_instance
raise ValueError("not a TaskInstance") |
Get pool by a given name. | def get_pool(name, session: Session = NEW_SESSION):
"""Get pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.scalar(select(Pool).filter_by(pool=name).limit(1))
if pool is None:
raise PoolNotFound(f"Pool '{name}' doesn't exist")
return pool |
Get all pools. | def get_pools(session: Session = NEW_SESSION):
"""Get all pools."""
return session.scalars(select(Pool)).all() |
Create a pool with given parameters. | def create_pool(name, slots, description, session: Session = NEW_SESSION):
"""Create a pool with given parameters."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
try:
slots = int(slots)
except ValueError:
raise AirflowBadRequest(f"Bad value for `slots`: {slots}")
# Get the length of the pool column
pool_name_length = Pool.pool.property.columns[0].type.length
if len(name) > pool_name_length:
raise AirflowBadRequest(f"Pool name can't be more than {pool_name_length} characters")
session.expire_on_commit = False
pool = session.scalar(select(Pool).filter_by(pool=name).limit(1))
if pool is None:
pool = Pool(pool=name, slots=slots, description=description, include_deferred=False)
session.add(pool)
else:
pool.slots = slots
pool.description = description
session.commit()
return pool |
Delete pool by a given name. | def delete_pool(name, session: Session = NEW_SESSION):
"""Delete pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
if name == Pool.DEFAULT_POOL_NAME:
raise AirflowBadRequest(f"{Pool.DEFAULT_POOL_NAME} cannot be deleted")
pool = session.scalar(select(Pool).filter_by(pool=name).limit(1))
if pool is None:
raise PoolNotFound(f"Pool '{name}' doesn't exist")
session.delete(pool)
session.commit()
return pool |
Check DAG existence and in case it is specified that Task exists. | def check_and_get_dag(dag_id: str, task_id: str | None = None) -> DagModel:
"""Check DAG existence and in case it is specified that Task exists."""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {dag_id} not found in DagModel")
dagbag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
dag = dagbag.get_dag(dag_id)
if not dag:
error_message = f"Dag id {dag_id} not found"
raise DagNotFound(error_message)
if task_id and not dag.has_task(task_id):
error_message = f"Task {task_id} not found in dag {dag_id}"
raise TaskNotFound(error_message)
return dag |
Get DagRun object and check that it exists. | def check_and_get_dagrun(dag: DagModel, execution_date: datetime) -> DagRun:
"""Get DagRun object and check that it exists."""
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
error_message = f"Dag Run for date {execution_date} not found in dag {dag.dag_id}"
raise DagRunNotFound(error_message)
return dagrun |
Use to capture connexion exceptions and add link to the type field. | def common_error_handler(exception: BaseException) -> flask.Response:
"""Use to capture connexion exceptions and add link to the type field."""
if isinstance(exception, ProblemException):
link = EXCEPTIONS_LINK_MAP.get(exception.status)
if link:
response = problem(
status=exception.status,
title=exception.title,
detail=exception.detail,
type=link,
instance=exception.instance,
headers=exception.headers,
ext=exception.ext,
)
else:
response = problem(
status=exception.status,
title=exception.title,
detail=exception.detail,
type=exception.type,
instance=exception.instance,
headers=exception.headers,
ext=exception.ext,
)
else:
if not isinstance(exception, werkzeug.exceptions.HTTPException):
exception = werkzeug.exceptions.InternalServerError()
response = problem(title=exception.name, detail=exception.description, status=exception.code)
return FlaskApi.get_response(response) |
Validate that a datetime is not naive. | def validate_istimezone(value: datetime) -> None:
"""Validate that a datetime is not naive."""
if not value.tzinfo:
raise BadRequest("Invalid datetime format", detail="Naive datetime is disallowed") |
Format datetime objects.
Datetime format parser for args since connexion doesn't parse datetimes
https://github.com/zalando/connexion/issues/476
This should only be used within connection views because it raises 400 | def format_datetime(value: str) -> datetime:
"""
Format datetime objects.
Datetime format parser for args since connexion doesn't parse datetimes
https://github.com/zalando/connexion/issues/476
This should only be used within connection views because it raises 400
"""
value = value.strip()
if value[-1] != "Z":
value = value.replace(" ", "+")
try:
return timezone.parse(value)
except (ParserError, TypeError) as err:
raise BadRequest("Incorrect datetime argument", detail=str(err)) |
Check the limit does not exceed configured value.
This checks the limit passed to view and raises BadRequest if
limit exceed user configured value | def check_limit(value: int) -> int:
"""
Check the limit does not exceed configured value.
This checks the limit passed to view and raises BadRequest if
limit exceed user configured value
"""
max_val = conf.getint("api", "maximum_page_limit") # user configured max page limit
fallback = conf.getint("api", "fallback_page_limit")
if value > max_val:
log.warning(
"The limit param value %s passed in API exceeds the configured maximum page limit %s",
value,
max_val,
)
return max_val
if value == 0:
return fallback
if value < 0:
raise BadRequest("Page limit must be a positive integer")
return value |
Create a decorator to convert parameters using given formatters.
Using it allows you to separate parameter formatting from endpoint logic.
:param params_formatters: Map of key name and formatter function | def format_parameters(params_formatters: dict[str, Callable[[Any], Any]]) -> Callable[[T], T]:
"""
Create a decorator to convert parameters using given formatters.
Using it allows you to separate parameter formatting from endpoint logic.
:param params_formatters: Map of key name and formatter function
"""
def format_parameters_decorator(func: T) -> T:
@wraps(func)
def wrapped_function(*args, **kwargs):
for key, formatter in params_formatters.items():
if key in kwargs:
kwargs[key] = formatter(kwargs[key])
return func(*args, **kwargs)
return cast(T, wrapped_function)
return format_parameters_decorator |
Apply sorting to query. | def apply_sorting(
query: Select,
order_by: str,
to_replace: dict[str, str] | None = None,
allowed_attrs: Container[str] | None = None,
) -> Select:
"""Apply sorting to query."""
lstriped_orderby = order_by.lstrip("-")
if allowed_attrs and lstriped_orderby not in allowed_attrs:
raise BadRequest(
detail=f"Ordering with '{lstriped_orderby}' is disallowed or "
f"the attribute does not exist on the model"
)
if to_replace:
lstriped_orderby = to_replace.get(lstriped_orderby, lstriped_orderby)
if order_by[0] == "-":
order_by = f"{lstriped_orderby} desc"
else:
order_by = f"{lstriped_orderby} asc"
return query.order_by(text(order_by)) |
Check that the request has valid authorization information. | def check_authentication() -> None:
"""Check that the request has valid authorization information."""
for auth in get_airflow_app().api_auth:
response = auth.requires_authentication(Response)()
if response.status_code == 200:
return
# Even if the current_user is anonymous, the AUTH_ROLE_PUBLIC might still have permission.
appbuilder = get_airflow_app().appbuilder
if appbuilder.get_app.config.get("AUTH_ROLE_PUBLIC", None):
return
# since this handler only checks authentication, not authorization,
# we should always return 401
raise Unauthenticated(headers=response.headers) |
Check current user's permissions against required permissions.
Deprecated. Do not use this decorator, use one of the decorator `has_access_*` defined in
airflow/api_connexion/security.py instead.
This decorator will only work with FAB authentication and not with other auth providers.
This decorator might be used in user plugins, do not remove it. | def requires_access(permissions: Sequence[tuple[str, str]] | None = None) -> Callable[[T], T]:
"""
Check current user's permissions against required permissions.
Deprecated. Do not use this decorator, use one of the decorator `has_access_*` defined in
airflow/api_connexion/security.py instead.
This decorator will only work with FAB authentication and not with other auth providers.
This decorator might be used in user plugins, do not remove it.
"""
warnings.warn(
"The 'requires_access' decorator is deprecated. Please use one of the decorator `requires_access_*`"
"defined in airflow/api_connexion/security.py instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
from airflow.providers.fab.auth_manager.decorators.auth import _requires_access_fab
return _requires_access_fab(permissions) |
Define the behavior whether the user is authorized to access the resource.
:param is_authorized_callback: callback to execute to figure whether the user is authorized to access
the resource
:param func: the function to call if the user is authorized
:param args: the arguments of ``func``
:param kwargs: the keyword arguments ``func``
:meta private: | def _requires_access(*, is_authorized_callback: Callable[[], bool], func: Callable, args, kwargs) -> bool:
"""
Define the behavior whether the user is authorized to access the resource.
:param is_authorized_callback: callback to execute to figure whether the user is authorized to access
the resource
:param func: the function to call if the user is authorized
:param args: the arguments of ``func``
:param kwargs: the keyword arguments ``func``
:meta private:
"""
check_authentication()
if is_authorized_callback():
return func(*args, **kwargs)
raise PermissionDenied() |
Convert config dict to a Config object. | def _conf_dict_to_config(conf_dict: dict) -> Config:
"""Convert config dict to a Config object."""
config = Config(
sections=[
ConfigSection(
name=section, options=[ConfigOption(key=key, value=value) for key, value in options.items()]
)
for section, options in conf_dict.items()
]
)
return config |
Convert a single config option to text. | def _option_to_text(config_option: ConfigOption) -> str:
"""Convert a single config option to text."""
return f"{config_option.key} = {config_option.value}" |
Convert a single config section to text. | def _section_to_text(config_section: ConfigSection) -> str:
"""Convert a single config section to text."""
return (
f"[{config_section.name}]{LINE_SEP}"
f"{LINE_SEP.join(_option_to_text(option) for option in config_section.options)}{LINE_SEP}"
) |
Convert the entire config to text. | def _config_to_text(config: Config) -> str:
"""Convert the entire config to text."""
return LINE_SEP.join(_section_to_text(s) for s in config.sections) |
Convert a Config object to a JSON formatted string. | def _config_to_json(config: Config) -> str:
"""Convert a Config object to a JSON formatted string."""
return json.dumps(config_schema.dump(config), indent=4) |
Get current configuration. | def get_config(*, section: str | None = None) -> Response:
"""Get current configuration."""
serializer = {
"text/plain": _config_to_text,
"application/json": _config_to_json,
}
return_type = request.accept_mimetypes.best_match(serializer.keys())
if conf.get("webserver", "expose_config").lower() == "non-sensitive-only":
expose_config = True
display_sensitive = False
else:
expose_config = conf.getboolean("webserver", "expose_config")
display_sensitive = True
if return_type not in serializer:
return Response(status=HTTPStatus.NOT_ACCEPTABLE)
elif expose_config:
if section and not conf.has_section(section):
raise NotFound("section not found.", detail=f"section={section} not found.")
conf_dict = conf.as_dict(display_source=False, display_sensitive=display_sensitive)
if section:
conf_section_value = conf_dict[section]
conf_dict.clear()
conf_dict[section] = conf_section_value
config = _conf_dict_to_config(conf_dict)
config_text = serializer[return_type](config)
return Response(config_text, headers={"Content-Type": return_type})
else:
raise PermissionDenied(
detail=(
"Your Airflow administrator chose not to expose the configuration, most likely for security"
" reasons."
)
) |
Delete a connection entry. | def delete_connection(*, connection_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete a connection entry."""
connection = session.scalar(select(Connection).filter_by(conn_id=connection_id))
if connection is None:
raise NotFound(
"Connection not found",
detail=f"The Connection with connection_id: `{connection_id}` was not found",
)
session.delete(connection)
return NoContent, HTTPStatus.NO_CONTENT |
Get a connection entry. | def get_connection(*, connection_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a connection entry."""
connection = session.scalar(select(Connection).where(Connection.conn_id == connection_id))
if connection is None:
raise NotFound(
"Connection not found",
detail=f"The Connection with connection_id: `{connection_id}` was not found",
)
return connection_schema.dump(connection) |
Get all connection entries. | def get_connections(
*,
limit: int,
offset: int = 0,
order_by: str = "id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all connection entries."""
to_replace = {"connection_id": "conn_id"}
allowed_sort_attrs = ["connection_id", "conn_type", "description", "host", "port", "id"]
total_entries = session.execute(select(func.count(Connection.id))).scalar_one()
query = select(Connection)
query = apply_sorting(query, order_by, to_replace, allowed_sort_attrs)
connections = session.scalars(query.offset(offset).limit(limit)).all()
return connection_collection_schema.dump(
ConnectionCollection(connections=connections, total_entries=total_entries)
) |
Update a connection entry. | def patch_connection(
*,
connection_id: str,
update_mask: UpdateMask = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Update a connection entry."""
try:
data = connection_schema.load(request.json, partial=True)
except ValidationError as err:
# If validation get to here, it is extra field validation.
raise BadRequest(detail=str(err.messages))
non_update_fields = ["connection_id", "conn_id"]
connection = session.scalar(select(Connection).filter_by(conn_id=connection_id).limit(1))
if connection is None:
raise NotFound(
"Connection not found",
detail=f"The Connection with connection_id: `{connection_id}` was not found",
)
if data.get("conn_id") and connection.conn_id != data["conn_id"]:
raise BadRequest(detail="The connection_id cannot be updated.")
if update_mask:
data = extract_update_mask_data(update_mask, non_update_fields, data)
for key in data:
setattr(connection, key, data[key])
session.add(connection)
session.commit()
return connection_schema.dump(connection) |
Create connection entry. | def post_connection(*, session: Session = NEW_SESSION) -> APIResponse:
"""Create connection entry."""
body = request.json
try:
data = connection_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
conn_id = data["conn_id"]
try:
helpers.validate_key(conn_id, max_length=200)
except Exception as e:
raise BadRequest(detail=str(e))
connection = session.scalar(select(Connection).filter_by(conn_id=conn_id).limit(1))
if not connection:
connection = Connection(**data)
session.add(connection)
session.commit()
return connection_schema.dump(connection)
raise AlreadyExists(detail=f"Connection already exist. ID: {conn_id}") |
Test an API connection.
This method first creates an in-memory transient conn_id & exports that to an
env var, as some hook classes tries to find out the conn from their __init__ method & errors out
if not found. It also deletes the conn id env variable after the test. | def test_connection() -> APIResponse:
"""
Test an API connection.
This method first creates an in-memory transient conn_id & exports that to an
env var, as some hook classes tries to find out the conn from their __init__ method & errors out
if not found. It also deletes the conn id env variable after the test.
"""
if conf.get("core", "test_connection", fallback="Disabled").lower().strip() != "enabled":
return Response(
"Testing connections is disabled in Airflow configuration. Contact your deployment admin to "
"enable it.",
403,
)
body = request.json
transient_conn_id = get_random_string()
conn_env_var = f"{CONN_ENV_PREFIX}{transient_conn_id.upper()}"
try:
data = connection_schema.load(body)
data["conn_id"] = transient_conn_id
conn = Connection(**data)
os.environ[conn_env_var] = conn.get_uri()
status, message = conn.test_connection()
return connection_test_schema.dump({"status": status, "message": message})
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
finally:
os.environ.pop(conn_env_var, None) |
Get basic information about a DAG. | def get_dag(
*, dag_id: str, fields: Collection[str] | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Get basic information about a DAG."""
dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id))
if dag is None:
raise NotFound("DAG not found", detail=f"The DAG with dag_id: {dag_id} was not found")
try:
dag_schema = DAGSchema(only=fields) if fields else DAGSchema()
except ValueError as e:
raise BadRequest("DAGSchema init error", detail=str(e))
return dag_schema.dump(
dag,
) |
Get details of DAG. | def get_dag_details(
*, dag_id: str, fields: Collection[str] | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Get details of DAG."""
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found", detail=f"The DAG with dag_id: {dag_id} was not found")
dag_model: DagModel = session.get(DagModel, dag_id)
for key, value in dag.__dict__.items():
if not key.startswith("_") and not hasattr(dag_model, key):
setattr(dag_model, key, value)
try:
dag_detail_schema = DAGDetailSchema(only=fields) if fields else DAGDetailSchema()
except ValueError as e:
raise BadRequest("DAGDetailSchema init error", detail=str(e))
return dag_detail_schema.dump(dag_model) |
Get all DAGs. | def get_dags(
*,
limit: int,
offset: int = 0,
tags: Collection[str] | None = None,
dag_id_pattern: str | None = None,
only_active: bool = True,
paused: bool | None = None,
order_by: str = "dag_id",
fields: Collection[str] | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all DAGs."""
allowed_attrs = ["dag_id"]
dags_query = select(DagModel).where(~DagModel.is_subdag)
if only_active:
dags_query = dags_query.where(DagModel.is_active)
if paused is not None:
if paused:
dags_query = dags_query.where(DagModel.is_paused)
else:
dags_query = dags_query.where(~DagModel.is_paused)
if dag_id_pattern:
dags_query = dags_query.where(DagModel.dag_id.ilike(f"%{dag_id_pattern}%"))
readable_dags = get_auth_manager().get_permitted_dag_ids(user=g.user)
dags_query = dags_query.where(DagModel.dag_id.in_(readable_dags))
if tags:
cond = [DagModel.tags.any(DagTag.name == tag) for tag in tags]
dags_query = dags_query.where(or_(*cond))
total_entries = get_query_count(dags_query, session=session)
dags_query = apply_sorting(dags_query, order_by, {}, allowed_attrs)
dags = session.scalars(dags_query.offset(offset).limit(limit)).all()
try:
dags_collection_schema = (
DAGCollectionSchema(only=[f"dags.{field}" for field in fields])
if fields
else DAGCollectionSchema()
)
return dags_collection_schema.dump(DAGCollection(dags=dags, total_entries=total_entries))
except ValueError as e:
raise BadRequest("DAGCollectionSchema error", detail=str(e)) |
Update the specific DAG. | def patch_dag(*, dag_id: str, update_mask: UpdateMask = None, session: Session = NEW_SESSION) -> APIResponse:
"""Update the specific DAG."""
try:
patch_body = dag_schema.load(request.json, session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
patch_body_ = {}
if update_mask != ["is_paused"]:
raise BadRequest(detail="Only `is_paused` field can be updated through the REST API")
patch_body_[update_mask[0]] = patch_body[update_mask[0]]
patch_body = patch_body_
dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id))
if not dag:
raise NotFound(f"Dag with id: '{dag_id}' not found")
dag.is_paused = patch_body["is_paused"]
session.flush()
return dag_schema.dump(dag) |
Patch multiple DAGs. | def patch_dags(limit, session, offset=0, only_active=True, tags=None, dag_id_pattern=None, update_mask=None):
"""Patch multiple DAGs."""
try:
patch_body = dag_schema.load(request.json, session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
patch_body_ = {}
if update_mask != ["is_paused"]:
raise BadRequest(detail="Only `is_paused` field can be updated through the REST API")
update_mask = update_mask[0]
patch_body_[update_mask] = patch_body[update_mask]
patch_body = patch_body_
if only_active:
dags_query = select(DagModel).where(~DagModel.is_subdag, DagModel.is_active)
else:
dags_query = select(DagModel).where(~DagModel.is_subdag)
if dag_id_pattern == "~":
dag_id_pattern = "%"
dags_query = dags_query.where(DagModel.dag_id.ilike(f"%{dag_id_pattern}%"))
editable_dags = get_auth_manager().get_permitted_dag_ids(methods=["PUT"], user=g.user)
dags_query = dags_query.where(DagModel.dag_id.in_(editable_dags))
if tags:
cond = [DagModel.tags.any(DagTag.name == tag) for tag in tags]
dags_query = dags_query.where(or_(*cond))
total_entries = get_query_count(dags_query, session=session)
dags = session.scalars(dags_query.order_by(DagModel.dag_id).offset(offset).limit(limit)).all()
dags_to_update = {dag.dag_id for dag in dags}
session.execute(
update(DagModel)
.where(DagModel.dag_id.in_(dags_to_update))
.values(is_paused=patch_body["is_paused"])
.execution_options(synchronize_session="fetch")
)
session.flush()
return dags_collection_schema.dump(DAGCollection(dags=dags, total_entries=total_entries)) |
Delete the specific DAG. | def delete_dag(dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete the specific DAG."""
from airflow.api.common import delete_dag as delete_dag_module
try:
delete_dag_module.delete_dag(dag_id, session=session)
except DagNotFound:
raise NotFound(f"Dag with id: '{dag_id}' not found")
except AirflowException:
raise AlreadyExists(detail=f"Task instances of dag with id: '{dag_id}' are still running")
return NoContent, HTTPStatus.NO_CONTENT |
Delete a DAG Run. | def delete_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete a DAG Run."""
deleted_count = session.execute(
delete(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
).rowcount
if deleted_count == 0:
raise NotFound(detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found")
return NoContent, HTTPStatus.NO_CONTENT |
Get a DAG Run. | def get_dag_run(
*, dag_id: str, dag_run_id: str, fields: Collection[str] | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Get a DAG Run."""
dag_run = session.scalar(select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id))
if dag_run is None:
raise NotFound(
"DAGRun not found",
detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found",
)
try:
# parse fields to Schema @post_dump
dagrun_schema = DAGRunSchema(context={"fields": fields}) if fields else DAGRunSchema()
return dagrun_schema.dump(dag_run)
except ValueError as e:
# Invalid fields
raise BadRequest("DAGRunSchema error", detail=str(e)) |
If dag run is dataset-triggered, return the dataset events that triggered it. | def get_upstream_dataset_events(
*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION
) -> APIResponse:
"""If dag run is dataset-triggered, return the dataset events that triggered it."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(
DagRun.dag_id == dag_id,
DagRun.run_id == dag_run_id,
)
)
if dag_run is None:
raise NotFound(
"DAGRun not found",
detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found",
)
events = dag_run.consumed_dataset_events
return dataset_event_collection_schema.dump(
DatasetEventCollection(dataset_events=events, total_entries=len(events))
) |
Get all DAG Runs. | def get_dag_runs(
*,
dag_id: str,
start_date_gte: str | None = None,
start_date_lte: str | None = None,
execution_date_gte: str | None = None,
execution_date_lte: str | None = None,
end_date_gte: str | None = None,
end_date_lte: str | None = None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
state: list[str] | None = None,
offset: int | None = None,
limit: int | None = None,
order_by: str = "id",
fields: Collection[str] | None = None,
session: Session = NEW_SESSION,
):
"""Get all DAG Runs."""
query = select(DagRun)
# This endpoint allows specifying ~ as the dag_id to retrieve DAG Runs for all DAGs.
if dag_id == "~":
query = query.where(
DagRun.dag_id.in_(get_auth_manager().get_permitted_dag_ids(methods=["GET"], user=g.user))
)
else:
query = query.where(DagRun.dag_id == dag_id)
if state:
query = query.where(DagRun.state.in_(state))
dag_run, total_entries = _fetch_dag_runs(
query,
end_date_gte=end_date_gte,
end_date_lte=end_date_lte,
execution_date_gte=execution_date_gte,
execution_date_lte=execution_date_lte,
start_date_gte=start_date_gte,
start_date_lte=start_date_lte,
updated_at_gte=updated_at_gte,
updated_at_lte=updated_at_lte,
limit=limit,
offset=offset,
order_by=order_by,
session=session,
)
try:
dagrun_collection_schema = (
DAGRunCollectionSchema(context={"fields": fields}) if fields else DAGRunCollectionSchema()
)
return dagrun_collection_schema.dump(DAGRunCollection(dag_runs=dag_run, total_entries=total_entries))
except ValueError as e:
raise BadRequest("DAGRunCollectionSchema error", detail=str(e)) |
Get list of DAG Runs. | def get_dag_runs_batch(*, session: Session = NEW_SESSION) -> APIResponse:
"""Get list of DAG Runs."""
body = get_json_request_dict()
try:
data = dagruns_batch_form_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
readable_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=["GET"], user=g.user)
query = select(DagRun)
if data.get("dag_ids"):
dag_ids = set(data["dag_ids"]) & set(readable_dag_ids)
query = query.where(DagRun.dag_id.in_(dag_ids))
else:
query = query.where(DagRun.dag_id.in_(readable_dag_ids))
states = data.get("states")
if states:
query = query.where(DagRun.state.in_(states))
dag_runs, total_entries = _fetch_dag_runs(
query,
end_date_gte=data["end_date_gte"],
end_date_lte=data["end_date_lte"],
execution_date_gte=data["execution_date_gte"],
execution_date_lte=data["execution_date_lte"],
start_date_gte=data["start_date_gte"],
start_date_lte=data["start_date_lte"],
limit=data["page_limit"],
offset=data["page_offset"],
order_by=data.get("order_by", "id"),
session=session,
)
return dagrun_collection_schema.dump(DAGRunCollection(dag_runs=dag_runs, total_entries=total_entries)) |
Trigger a DAG. | def post_dag_run(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Trigger a DAG."""
dm = session.scalar(select(DagModel).where(DagModel.is_active, DagModel.dag_id == dag_id).limit(1))
if not dm:
raise NotFound(title="DAG not found", detail=f"DAG with dag_id: '{dag_id}' not found")
if dm.has_import_errors:
raise BadRequest(
title="DAG cannot be triggered",
detail=f"DAG with dag_id: '{dag_id}' has import errors",
)
try:
post_body = dagrun_schema.load(get_json_request_dict(), session=session)
except ValidationError as err:
raise BadRequest(detail=str(err))
logical_date = pendulum.instance(post_body["execution_date"])
run_id = post_body["run_id"]
dagrun_instance = session.scalar(
select(DagRun)
.where(
DagRun.dag_id == dag_id,
or_(DagRun.run_id == run_id, DagRun.execution_date == logical_date),
)
.limit(1)
)
if not dagrun_instance:
try:
dag = get_airflow_app().dag_bag.get_dag(dag_id)
data_interval_start = post_body.get("data_interval_start")
data_interval_end = post_body.get("data_interval_end")
if data_interval_start and data_interval_end:
data_interval = DataInterval(
start=pendulum.instance(data_interval_start),
end=pendulum.instance(data_interval_end),
)
else:
data_interval = dag.timetable.infer_manual_data_interval(run_after=logical_date)
dag_run = dag.create_dagrun(
run_type=DagRunType.MANUAL,
run_id=run_id,
execution_date=logical_date,
data_interval=data_interval,
state=DagRunState.QUEUED,
conf=post_body.get("conf"),
external_trigger=True,
dag_hash=get_airflow_app().dag_bag.dags_hash.get(dag_id),
session=session,
)
dag_run_note = post_body.get("note")
if dag_run_note:
current_user_id = get_auth_manager().get_user_id()
dag_run.note = (dag_run_note, current_user_id)
return dagrun_schema.dump(dag_run)
except ValueError as ve:
raise BadRequest(detail=str(ve))
if dagrun_instance.execution_date == logical_date:
raise AlreadyExists(
detail=(
f"DAGRun with DAG ID: '{dag_id}' and "
f"DAGRun logical date: '{logical_date.isoformat(sep=' ')}' already exists"
),
)
raise AlreadyExists(detail=f"DAGRun with DAG ID: '{dag_id}' and DAGRun ID: '{run_id}' already exists") |
Set a state of a dag run. | def update_dag_run_state(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Set a state of a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = set_dagrun_state_form_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest(detail=str(err))
state = post_body["state"]
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if state == DagRunState.SUCCESS:
set_dag_run_state_to_success(dag=dag, run_id=dag_run.run_id, commit=True)
elif state == DagRunState.QUEUED:
set_dag_run_state_to_queued(dag=dag, run_id=dag_run.run_id, commit=True)
else:
set_dag_run_state_to_failed(dag=dag, run_id=dag_run.run_id, commit=True)
dag_run = session.get(DagRun, dag_run.id)
return dagrun_schema.dump(dag_run) |
Clear a dag run. | def clear_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Clear a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = clear_dagrun_form_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest(detail=str(err))
dry_run = post_body.get("dry_run", False)
dag = get_airflow_app().dag_bag.get_dag(dag_id)
start_date = dag_run.logical_date
end_date = dag_run.logical_date
if dry_run:
task_instances = dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=None,
include_subdags=True,
include_parentdag=True,
only_failed=False,
dry_run=True,
)
return task_instance_reference_collection_schema.dump(
TaskInstanceReferenceCollection(task_instances=task_instances)
)
else:
dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=None,
include_subdags=True,
include_parentdag=True,
only_failed=False,
)
dag_run = session.execute(select(DagRun).where(DagRun.id == dag_run.id)).scalar_one()
return dagrun_schema.dump(dag_run) |
Set the note for a dag run. | def set_dag_run_note(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Set the note for a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = set_dagrun_note_form_schema.load(get_json_request_dict())
new_note = post_body["note"]
except ValidationError as err:
raise BadRequest(detail=str(err))
current_user_id = get_auth_manager().get_user_id()
if dag_run.dag_run_note is None:
dag_run.note = (new_note, current_user_id)
else:
dag_run.dag_run_note.content = new_note
dag_run.dag_run_note.user_id = current_user_id
session.commit()
return dagrun_schema.dump(dag_run) |
Get source code using file token. | def get_dag_source(*, file_token: str, session: Session = NEW_SESSION) -> Response:
"""Get source code using file token."""
secret_key = current_app.config["SECRET_KEY"]
auth_s = URLSafeSerializer(secret_key)
try:
path = auth_s.loads(file_token)
dag_ids = session.query(DagModel.dag_id).filter(DagModel.fileloc == path).all()
requests: Sequence[IsAuthorizedDagRequest] = [
{
"method": "GET",
"details": DagDetails(id=dag_id[0]),
}
for dag_id in dag_ids
]
# Check if user has read access to all the DAGs defined in the file
if not get_auth_manager().batch_is_authorized_dag(requests):
raise PermissionDenied()
dag_source = DagCode.code(path, session=session)
except (BadSignature, FileNotFoundError):
raise NotFound("Dag source not found")
return_type = request.accept_mimetypes.best_match(["text/plain", "application/json"])
if return_type == "text/plain":
return Response(dag_source, headers={"Content-Type": return_type})
if return_type == "application/json":
content = dag_source_schema.dumps({"content": dag_source})
return Response(content, headers={"Content-Type": return_type})
return Response("Not Allowed Accept Header", status=HTTPStatus.NOT_ACCEPTABLE) |
Get DAG warnings.
:param dag_id: the dag_id to optionally filter by
:param warning_type: the warning type to optionally filter by | def get_dag_warnings(
*,
limit: int,
dag_id: str | None = None,
warning_type: str | None = None,
offset: int | None = None,
order_by: str = "timestamp",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get DAG warnings.
:param dag_id: the dag_id to optionally filter by
:param warning_type: the warning type to optionally filter by
"""
allowed_sort_attrs = ["dag_id", "warning_type", "message", "timestamp"]
query = select(DagWarningModel)
if dag_id:
query = query.where(DagWarningModel.dag_id == dag_id)
else:
readable_dags = get_readable_dags()
query = query.where(DagWarningModel.dag_id.in_(readable_dags))
if warning_type:
query = query.where(DagWarningModel.warning_type == warning_type)
total_entries = get_query_count(query, session=session)
query = apply_sorting(query=query, order_by=order_by, allowed_attrs=allowed_sort_attrs)
dag_warnings = session.scalars(query.offset(offset).limit(limit)).all()
return dag_warning_collection_schema.dump(
DagWarningCollection(dag_warnings=dag_warnings, total_entries=total_entries)
) |
Get a Dataset. | def get_dataset(*, uri: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a Dataset."""
dataset = session.scalar(
select(DatasetModel)
.where(DatasetModel.uri == uri)
.options(joinedload(DatasetModel.consuming_dags), joinedload(DatasetModel.producing_tasks))
)
if not dataset:
raise NotFound(
"Dataset not found",
detail=f"The Dataset with uri: `{uri}` was not found",
)
return dataset_schema.dump(dataset) |
Get datasets. | def get_datasets(
*,
limit: int,
offset: int = 0,
uri_pattern: str | None = None,
dag_ids: str | None = None,
order_by: str = "id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get datasets."""
allowed_attrs = ["id", "uri", "created_at", "updated_at"]
total_entries = session.scalars(select(func.count(DatasetModel.id))).one()
query = select(DatasetModel)
if dag_ids:
dags_list = dag_ids.split(",")
query = query.filter(
(DatasetModel.consuming_dags.any(DagScheduleDatasetReference.dag_id.in_(dags_list)))
| (DatasetModel.producing_tasks.any(TaskOutletDatasetReference.dag_id.in_(dags_list)))
)
if uri_pattern:
query = query.where(DatasetModel.uri.ilike(f"%{uri_pattern}%"))
query = apply_sorting(query, order_by, {}, allowed_attrs)
datasets = session.scalars(
query.options(subqueryload(DatasetModel.consuming_dags), subqueryload(DatasetModel.producing_tasks))
.offset(offset)
.limit(limit)
).all()
return dataset_collection_schema.dump(DatasetCollection(datasets=datasets, total_entries=total_entries)) |
Get dataset events. | def get_dataset_events(
*,
limit: int,
offset: int = 0,
order_by: str = "timestamp",
dataset_id: int | None = None,
source_dag_id: str | None = None,
source_task_id: str | None = None,
source_run_id: str | None = None,
source_map_index: int | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get dataset events."""
allowed_attrs = ["source_dag_id", "source_task_id", "source_run_id", "source_map_index", "timestamp"]
query = select(DatasetEvent)
if dataset_id:
query = query.where(DatasetEvent.dataset_id == dataset_id)
if source_dag_id:
query = query.where(DatasetEvent.source_dag_id == source_dag_id)
if source_task_id:
query = query.where(DatasetEvent.source_task_id == source_task_id)
if source_run_id:
query = query.where(DatasetEvent.source_run_id == source_run_id)
if source_map_index:
query = query.where(DatasetEvent.source_map_index == source_map_index)
query = query.options(subqueryload(DatasetEvent.created_dagruns))
total_entries = get_query_count(query, session=session)
query = apply_sorting(query, order_by, {}, allowed_attrs)
events = session.scalars(query.offset(offset).limit(limit)).all()
return dataset_event_collection_schema.dump(
DatasetEventCollection(dataset_events=events, total_entries=total_entries)
) |
Get DatasetDagRunQueue where clause. | def _generate_queued_event_where_clause(
*,
dag_id: str | None = None,
dataset_id: int | None = None,
uri: str | None = None,
before: str | None = None,
permitted_dag_ids: set[str] | None = None,
) -> list:
"""Get DatasetDagRunQueue where clause."""
where_clause = []
if dag_id is not None:
where_clause.append(DatasetDagRunQueue.target_dag_id == dag_id)
if dataset_id is not None:
where_clause.append(DatasetDagRunQueue.dataset_id == dataset_id)
if uri is not None:
where_clause.append(
DatasetDagRunQueue.dataset_id.in_(
select(DatasetModel.id).where(DatasetModel.uri == uri),
),
)
if before is not None:
where_clause.append(DatasetDagRunQueue.created_at < format_datetime(before))
if permitted_dag_ids is not None:
where_clause.append(DatasetDagRunQueue.target_dag_id.in_(permitted_dag_ids))
return where_clause |
Get a queued Dataset event for a DAG. | def get_dag_dataset_queued_event(
*, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Get a queued Dataset event for a DAG."""
where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before)
ddrq = session.scalar(
select(DatasetDagRunQueue)
.join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)
.where(*where_clause)
)
if ddrq is None:
raise NotFound(
"Queue event not found",
detail=f"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found",
)
queued_event = {"created_at": ddrq.created_at, "dag_id": dag_id, "uri": uri}
return queued_event_schema.dump(queued_event) |
Delete a queued Dataset event for a DAG. | def delete_dag_dataset_queued_event(
*, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Delete a queued Dataset event for a DAG."""
where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before)
delete_stmt = (
delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session="fetch")
)
result = session.execute(delete_stmt)
if result.rowcount > 0:
return NoContent, HTTPStatus.NO_CONTENT
raise NotFound(
"Queue event not found",
detail=f"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found",
) |
Get queued Dataset events for a DAG. | def get_dag_dataset_queued_events(
*, dag_id: str, before: str | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Get queued Dataset events for a DAG."""
where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before)
query = (
select(DatasetDagRunQueue, DatasetModel.uri)
.join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)
.where(*where_clause)
)
result = session.execute(query).all()
total_entries = get_query_count(query, session=session)
if not result:
raise NotFound(
"Queue event not found",
detail=f"Queue event with dag_id: `{dag_id}` was not found",
)
queued_events = [
QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri) for ddrq, uri in result
]
return queued_event_collection_schema.dump(
QueuedEventCollection(queued_events=queued_events, total_entries=total_entries)
) |
Delete queued Dataset events for a DAG. | def delete_dag_dataset_queued_events(
*, dag_id: str, before: str | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Delete queued Dataset events for a DAG."""
where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before)
delete_stmt = delete(DatasetDagRunQueue).where(*where_clause)
result = session.execute(delete_stmt)
if result.rowcount > 0:
return NoContent, HTTPStatus.NO_CONTENT
raise NotFound(
"Queue event not found",
detail=f"Queue event with dag_id: `{dag_id}` was not found",
) |
Get queued Dataset events for a Dataset. | def get_dataset_queued_events(
*, uri: str, before: str | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Get queued Dataset events for a Dataset."""
permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=["GET"])
where_clause = _generate_queued_event_where_clause(
uri=uri, before=before, permitted_dag_ids=permitted_dag_ids
)
query = (
select(DatasetDagRunQueue, DatasetModel.uri)
.join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)
.where(*where_clause)
)
total_entries = get_query_count(query, session=session)
result = session.execute(query).all()
if total_entries > 0:
queued_events = [
QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri)
for ddrq, uri in result
]
return queued_event_collection_schema.dump(
QueuedEventCollection(queued_events=queued_events, total_entries=total_entries)
)
raise NotFound(
"Queue event not found",
detail=f"Queue event with dataset uri: `{uri}` was not found",
) |
Delete queued Dataset events for a Dataset. | def delete_dataset_queued_events(
*, uri: str, before: str | None = None, session: Session = NEW_SESSION
) -> APIResponse:
"""Delete queued Dataset events for a Dataset."""
permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=["GET"])
where_clause = _generate_queued_event_where_clause(
uri=uri, before=before, permitted_dag_ids=permitted_dag_ids
)
delete_stmt = (
delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session="fetch")
)
result = session.execute(delete_stmt)
if result.rowcount > 0:
return NoContent, HTTPStatus.NO_CONTENT
raise NotFound(
"Queue event not found",
detail=f"Queue event with dataset uri: `{uri}` was not found",
) |
Create dataset event. | def create_dataset_event(session: Session = NEW_SESSION) -> APIResponse:
"""Create dataset event."""
body = get_json_request_dict()
try:
json_body = create_dataset_event_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err))
uri = json_body["dataset_uri"]
dataset = session.scalar(select(DatasetModel).where(DatasetModel.uri == uri).limit(1))
if not dataset:
raise NotFound(title="Dataset not found", detail=f"Dataset with uri: '{uri}' not found")
timestamp = timezone.utcnow()
extra = json_body.get("extra", {})
extra["from_rest_api"] = True
dataset_event = dataset_manager.register_dataset_change(
dataset=Dataset(uri),
timestamp=timestamp,
extra=extra,
session=session,
)
if not dataset_event:
raise NotFound(title="Dataset not found", detail=f"Dataset with uri: '{uri}' not found")
event = dataset_event_schema.dump(dataset_event)
return event |
Get a log entry. | def get_event_log(*, event_log_id: int, session: Session = NEW_SESSION) -> APIResponse:
"""Get a log entry."""
event_log = session.get(Log, event_log_id)
if event_log is None:
raise NotFound("Event Log not found")
return event_log_schema.dump(event_log) |
Get all log entries from event log. | def get_event_logs(
*,
dag_id: str | None = None,
task_id: str | None = None,
run_id: str | None = None,
owner: str | None = None,
event: str | None = None,
excluded_events: str | None = None,
included_events: str | None = None,
before: str | None = None,
after: str | None = None,
limit: int,
offset: int | None = None,
order_by: str = "event_log_id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all log entries from event log."""
to_replace = {"event_log_id": "id", "when": "dttm"}
allowed_sort_attrs = [
"event_log_id",
"when",
"dag_id",
"task_id",
"run_id",
"event",
"execution_date",
"owner",
"extra",
]
query = select(Log)
if dag_id:
query = query.where(Log.dag_id == dag_id)
if task_id:
query = query.where(Log.task_id == task_id)
if run_id:
query = query.where(Log.run_id == run_id)
if owner:
query = query.where(Log.owner == owner)
if event:
query = query.where(Log.event == event)
if included_events:
included_events_list = included_events.split(",")
query = query.where(Log.event.in_(included_events_list))
if excluded_events:
excluded_events_list = excluded_events.split(",")
query = query.where(Log.event.notin_(excluded_events_list))
if before:
query = query.where(Log.dttm < timezone.parse(before))
if after:
query = query.where(Log.dttm > timezone.parse(after))
total_entries = get_query_count(query, session=session)
query = apply_sorting(query, order_by, to_replace, allowed_sort_attrs)
event_logs = session.scalars(query.offset(offset).limit(limit)).all()
return event_log_collection_schema.dump(
EventLogCollection(event_logs=event_logs, total_entries=total_entries)
) |
Get extra links for task instance. | def get_extra_links(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get extra links for task instance."""
from airflow.models.taskinstance import TaskInstance
dagbag: DagBag = get_airflow_app().dag_bag
dag: DAG = dagbag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found", detail=f'DAG with ID = "{dag_id}" not found')
try:
task = dag.get_task(task_id)
except TaskNotFound:
raise NotFound("Task not found", detail=f'Task with ID = "{task_id}" not found')
ti = session.scalar(
select(TaskInstance).where(
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == dag_run_id,
TaskInstance.task_id == task_id,
)
)
if not ti:
raise NotFound("DAG Run not found", detail=f'DAG Run with ID = "{dag_run_id}" not found')
all_extra_link_pairs = (
(link_name, task.get_extra_links(ti, link_name)) for link_name in task.extra_links
)
all_extra_links = {link_name: link_url or None for link_name, link_url in sorted(all_extra_link_pairs)}
return all_extra_links |
Raise an HTTP error 400 if the auth manager is not FAB.
Intended to decorate endpoints that have been migrated from Airflow API to FAB API. | def _require_fab(func: Callable) -> Callable:
"""
Raise an HTTP error 400 if the auth manager is not FAB.
Intended to decorate endpoints that have been migrated from Airflow API to FAB API.
"""
def inner(*args, **kwargs):
from airflow.providers.fab.auth_manager.fab_auth_manager import FabAuthManager
auth_mgr = get_auth_manager()
if not isinstance(auth_mgr, FabAuthManager):
raise BadRequest(
detail="This endpoint is only available when using the default auth manager FabAuthManager."
)
else:
warnings.warn(
"This API endpoint is deprecated. "
"Please use the API under /auth/fab/v1 instead for this operation.",
DeprecationWarning,
stacklevel=1, # This decorator wrapped multiple times, better point to this file
)
return func(*args, **kwargs)
return inner |
Get role. | def get_role(**kwargs) -> APIResponse:
"""Get role."""
return role_and_permission_endpoint.get_role(**kwargs) |
Get roles. | def get_roles(**kwargs) -> APIResponse:
"""Get roles."""
return role_and_permission_endpoint.get_roles(**kwargs) |
Delete a role. | def delete_role(**kwargs) -> APIResponse:
"""Delete a role."""
return role_and_permission_endpoint.delete_role(**kwargs) |
Update a role. | def patch_role(**kwargs) -> APIResponse:
"""Update a role."""
kwargs.pop("body", None)
return role_and_permission_endpoint.patch_role(**kwargs) |
Create a new role. | def post_role(**kwargs) -> APIResponse:
"""Create a new role."""
kwargs.pop("body", None)
return role_and_permission_endpoint.post_role(**kwargs) |
Get permissions. | def get_permissions(**kwargs) -> APIResponse:
"""Get permissions."""
return role_and_permission_endpoint.get_permissions(**kwargs) |
Get a user. | def get_user(**kwargs) -> APIResponse:
"""Get a user."""
return user_endpoint.get_user(**kwargs) |
Get users. | def get_users(**kwargs) -> APIResponse:
"""Get users."""
return user_endpoint.get_users(**kwargs) |
Create a new user. | def post_user(**kwargs) -> APIResponse:
"""Create a new user."""
kwargs.pop("body", None)
return user_endpoint.post_user(**kwargs) |
Update a user. | def patch_user(**kwargs) -> APIResponse:
"""Update a user."""
kwargs.pop("body", None)
return user_endpoint.patch_user(**kwargs) |
Delete a user. | def delete_user(**kwargs) -> APIResponse:
"""Delete a user."""
return user_endpoint.delete_user(**kwargs) |
Return the health of the airflow scheduler, metadatabase and triggerer. | def get_health() -> APIResponse:
"""Return the health of the airflow scheduler, metadatabase and triggerer."""
airflow_health_status = get_airflow_health()
return health_schema.dump(airflow_health_status) |
Get an import error. | def get_import_error(*, import_error_id: int, session: Session = NEW_SESSION) -> APIResponse:
"""Get an import error."""
error = session.get(ParseImportError, import_error_id)
if error is None:
raise NotFound(
"Import error not found",
detail=f"The ImportError with import_error_id: `{import_error_id}` was not found",
)
session.expunge(error)
can_read_all_dags = get_auth_manager().is_authorized_dag(method="GET")
if not can_read_all_dags:
readable_dag_ids = security.get_readable_dags()
file_dag_ids = {
dag_id[0]
for dag_id in session.query(DagModel.dag_id).filter(DagModel.fileloc == error.filename).all()
}
# Can the user read any DAGs in the file?
if not readable_dag_ids.intersection(file_dag_ids):
raise PermissionDenied(detail="You do not have read permission on any of the DAGs in the file")
# Check if user has read access to all the DAGs defined in the file
if not file_dag_ids.issubset(readable_dag_ids):
error.stacktrace = "REDACTED - you do not have read permission on all DAGs in the file"
return import_error_schema.dump(error) |
Get all import errors. | def get_import_errors(
*,
limit: int,
offset: int | None = None,
order_by: str = "import_error_id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all import errors."""
to_replace = {"import_error_id": "id"}
allowed_sort_attrs = ["import_error_id", "timestamp", "filename"]
count_query = select(func.count(ParseImportError.id))
query = select(ParseImportError)
query = apply_sorting(query, order_by, to_replace, allowed_sort_attrs)
can_read_all_dags = get_auth_manager().is_authorized_dag(method="GET")
if not can_read_all_dags:
# if the user doesn't have access to all DAGs, only display errors from visible DAGs
readable_dag_ids = security.get_readable_dags()
dagfiles_stmt = select(DagModel.fileloc).distinct().where(DagModel.dag_id.in_(readable_dag_ids))
query = query.where(ParseImportError.filename.in_(dagfiles_stmt))
count_query = count_query.where(ParseImportError.filename.in_(dagfiles_stmt))
total_entries = session.scalars(count_query).one()
import_errors = session.scalars(query.offset(offset).limit(limit)).all()
if not can_read_all_dags:
for import_error in import_errors:
# Check if user has read access to all the DAGs defined in the file
file_dag_ids = (
session.query(DagModel.dag_id).filter(DagModel.fileloc == import_error.filename).all()
)
requests: Sequence[IsAuthorizedDagRequest] = [
{
"method": "GET",
"details": DagDetails(id=dag_id[0]),
}
for dag_id in file_dag_ids
]
if not get_auth_manager().batch_is_authorized_dag(requests):
session.expunge(import_error)
import_error.stacktrace = "REDACTED - you do not have read permission on all DAGs in the file"
return import_error_collection_schema.dump(
ImportErrorCollection(import_errors=import_errors, total_entries=total_entries)
) |
Get logs for specific task instance. | def get_log(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
task_try_number: int,
full_content: bool = False,
map_index: int = -1,
token: str | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get logs for specific task instance."""
key = get_airflow_app().config["SECRET_KEY"]
if not token:
metadata = {}
else:
try:
metadata = URLSafeSerializer(key).loads(token)
except BadSignature:
raise BadRequest("Bad Signature. Please use only the tokens provided by the API.")
if metadata.get("download_logs") and metadata["download_logs"]:
full_content = True
if full_content:
metadata["download_logs"] = True
else:
metadata["download_logs"] = False
task_log_reader = TaskLogReader()
if not task_log_reader.supports_read:
raise BadRequest("Task log handler does not support read logs.")
query = (
select(TaskInstance)
.where(
TaskInstance.task_id == task_id,
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == dag_run_id,
TaskInstance.map_index == map_index,
)
.join(TaskInstance.dag_run)
.options(joinedload(TaskInstance.trigger).joinedload(Trigger.triggerer_job))
)
ti = session.scalar(query)
if ti is None:
metadata["end_of_log"] = True
raise NotFound(title="TaskInstance not found")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if dag:
try:
ti.task = dag.get_task(ti.task_id)
except TaskNotFound:
pass
return_type = request.accept_mimetypes.best_match(["text/plain", "application/json"])
# return_type would be either the above two or None
logs: Any
if return_type == "application/json" or return_type is None: # default
logs, metadata = task_log_reader.read_log_chunks(ti, task_try_number, metadata)
logs = logs[0] if task_try_number is not None else logs
# we must have token here, so we can safely ignore it
token = URLSafeSerializer(key).dumps(metadata) # type: ignore[assignment]
return logs_schema.dump(LogResponseObject(continuation_token=token, content=logs))
# text/plain. Stream
logs = task_log_reader.read_log_stream(ti, task_try_number, metadata)
return Response(logs, headers={"Content-Type": return_type}) |
Get plugins endpoint. | def get_plugins(*, limit: int, offset: int = 0) -> APIResponse:
"""Get plugins endpoint."""
plugins_info = get_plugin_info()
collection = PluginCollection(plugins=plugins_info[offset:][:limit], total_entries=len(plugins_info))
return plugin_collection_schema.dump(collection) |
Delete a pool. | def delete_pool(*, pool_name: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete a pool."""
if pool_name == "default_pool":
raise BadRequest(detail="Default Pool can't be deleted")
affected_count = session.execute(delete(Pool).where(Pool.pool == pool_name)).rowcount
if affected_count == 0:
raise NotFound(detail=f"Pool with name:'{pool_name}' not found")
return Response(status=HTTPStatus.NO_CONTENT) |
Get a pool. | def get_pool(*, pool_name: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a pool."""
obj = session.scalar(select(Pool).where(Pool.pool == pool_name))
if obj is None:
raise NotFound(detail=f"Pool with name:'{pool_name}' not found")
return pool_schema.dump(obj) |
Get all pools. | def get_pools(
*,
limit: int,
order_by: str = "id",
offset: int | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all pools."""
to_replace = {"name": "pool"}
allowed_sort_attrs = ["name", "slots", "id"]
total_entries = session.scalars(func.count(Pool.id)).one()
query = select(Pool)
query = apply_sorting(query, order_by, to_replace, allowed_sort_attrs)
pools = session.scalars(query.offset(offset).limit(limit)).all()
return pool_collection_schema.dump(PoolCollection(pools=pools, total_entries=total_entries)) |
Update a pool. | def patch_pool(
*,
pool_name: str,
update_mask: UpdateMask = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Update a pool."""
request_dict = get_json_request_dict()
# Only slots and include_deferred can be modified in 'default_pool'
if pool_name == Pool.DEFAULT_POOL_NAME and request_dict.get("name", None) != Pool.DEFAULT_POOL_NAME:
if update_mask and all(mask.strip() in {"slots", "include_deferred"} for mask in update_mask):
pass
else:
raise BadRequest(detail="Default Pool's name can't be modified")
pool = session.scalar(select(Pool).where(Pool.pool == pool_name).limit(1))
if not pool:
raise NotFound(detail=f"Pool with name:'{pool_name}' not found")
try:
patch_body = pool_schema.load(request_dict)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
update_mask = [i.strip() for i in update_mask]
_patch_body = {}
try:
# MyPy infers a List[Optional[str]] type here but it should be a List[str]
# there is no way field is None here (UpdateMask is a List[str])
# so if pool_schema.declared_fields[field].attribute is None file is returned
update_mask = [
pool_schema.declared_fields[field].attribute # type: ignore[misc]
if pool_schema.declared_fields[field].attribute
else field
for field in update_mask
]
except KeyError as err:
raise BadRequest(detail=f"Invalid field: {err.args[0]} in update mask")
_patch_body = {field: patch_body[field] for field in update_mask}
patch_body = _patch_body
else:
required_fields = {"name", "slots"}
fields_diff = required_fields.difference(get_json_request_dict())
if fields_diff:
raise BadRequest(detail=f"Missing required property(ies): {sorted(fields_diff)}")
for key, value in patch_body.items():
setattr(pool, key, value)
session.commit()
return pool_schema.dump(pool) |
Create a pool. | def post_pool(*, session: Session = NEW_SESSION) -> APIResponse:
"""Create a pool."""
required_fields = {"name", "slots"} # Pool would require both fields in the post request
fields_diff = required_fields.difference(get_json_request_dict())
if fields_diff:
raise BadRequest(detail=f"Missing required property(ies): {sorted(fields_diff)}")
try:
post_body = pool_schema.load(get_json_request_dict(), session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
pool = Pool(**post_body)
try:
session.add(pool)
session.commit()
return pool_schema.dump(pool)
except IntegrityError:
raise AlreadyExists(detail=f"Pool: {post_body['pool']} already exists") |
Get providers. | def get_providers() -> APIResponse:
"""Get providers."""
providers = [_provider_mapper(d) for d in ProvidersManager().providers.values()]
total_entries = len(providers)
return provider_collection_schema.dump(
ProviderCollection(providers=providers, total_entries=total_entries)
) |
Cast request dictionary to JSON. | def get_json_request_dict() -> Mapping[str, Any]:
"""Cast request dictionary to JSON."""
from flask import request
return cast(Mapping[str, Any], request.get_json()) |
Get simplified representation of a task. | def get_task(*, dag_id: str, task_id: str) -> APIResponse:
"""Get simplified representation of a task."""
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found")
try:
task = dag.get_task(task_id=task_id)
except TaskNotFound:
raise NotFound("Task not found")
return task_schema.dump(task) |
Get tasks for DAG. | def get_tasks(*, dag_id: str, order_by: str = "task_id") -> APIResponse:
"""Get tasks for DAG."""
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found")
tasks = dag.tasks
try:
tasks = sorted(tasks, key=attrgetter(order_by.lstrip("-")), reverse=(order_by[0:1] == "-"))
except AttributeError as err:
raise BadRequest(detail=str(err))
task_collection = TaskCollection(tasks=tasks, total_entries=len(tasks))
return task_collection_schema.dump(task_collection) |
Get task instance. | def get_task_instance(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get task instance."""
query = (
select(TI)
.where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id)
.join(TI.dag_run)
.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.execution_date == DR.execution_date,
SlaMiss.task_id == TI.task_id,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
)
try:
task_instance = session.execute(query).one_or_none()
except MultipleResultsFound:
raise NotFound(
"Task instance not found", detail="Task instance is mapped, add the map_index value to the URL"
)
if task_instance is None:
raise NotFound("Task instance not found")
if task_instance[0].map_index != -1:
raise NotFound(
"Task instance not found", detail="Task instance is mapped, add the map_index value to the URL"
)
return task_instance_schema.dump(task_instance) |
Get task instance. | def get_mapped_task_instance(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
map_index: int,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get task instance."""
query = (
select(TI)
.where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id, TI.map_index == map_index)
.join(TI.dag_run)
.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.execution_date == DR.execution_date,
SlaMiss.task_id == TI.task_id,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
)
task_instance = session.execute(query).one_or_none()
if task_instance is None:
raise NotFound("Task instance not found")
return task_instance_schema.dump(task_instance) |
Get list of task instances. | def get_mapped_task_instances(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
execution_date_gte: str | None = None,
execution_date_lte: str | None = None,
start_date_gte: str | None = None,
start_date_lte: str | None = None,
end_date_gte: str | None = None,
end_date_lte: str | None = None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
duration_gte: float | None = None,
duration_lte: float | None = None,
state: list[str] | None = None,
pool: list[str] | None = None,
queue: list[str] | None = None,
limit: int | None = None,
offset: int | None = None,
order_by: str | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get list of task instances."""
# Because state can be 'none'
states = _convert_ti_states(state)
base_query = (
select(TI)
.where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id, TI.map_index >= 0)
.join(TI.dag_run)
)
# 0 can mean a mapped TI that expanded to an empty list, so it is not an automatic 404
unfiltered_total_count = get_query_count(base_query, session=session)
if unfiltered_total_count == 0:
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
error_message = f"DAG {dag_id} not found"
raise NotFound(error_message)
try:
task = dag.get_task(task_id)
except TaskNotFound:
error_message = f"Task id {task_id} not found"
raise NotFound(error_message)
if not needs_expansion(task):
error_message = f"Task id {task_id} is not mapped"
raise NotFound(error_message)
# Other search criteria
base_query = _apply_range_filter(
base_query,
key=DR.execution_date,
value_range=(execution_date_gte, execution_date_lte),
)
base_query = _apply_range_filter(
base_query, key=TI.start_date, value_range=(start_date_gte, start_date_lte)
)
base_query = _apply_range_filter(base_query, key=TI.end_date, value_range=(end_date_gte, end_date_lte))
base_query = _apply_range_filter(base_query, key=TI.duration, value_range=(duration_gte, duration_lte))
base_query = _apply_range_filter(
base_query, key=TI.updated_at, value_range=(updated_at_gte, updated_at_lte)
)
base_query = _apply_array_filter(base_query, key=TI.state, values=states)
base_query = _apply_array_filter(base_query, key=TI.pool, values=pool)
base_query = _apply_array_filter(base_query, key=TI.queue, values=queue)
# Count elements before joining extra columns
total_entries = get_query_count(base_query, session=session)
# Add SLA miss
entry_query = (
base_query.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == DR.execution_date,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
)
if order_by is None:
entry_query = entry_query.order_by(TI.map_index.asc())
elif order_by == "state":
entry_query = entry_query.order_by(TI.state.asc(), TI.map_index.asc())
elif order_by == "-state":
entry_query = entry_query.order_by(TI.state.desc(), TI.map_index.asc())
elif order_by == "duration":
entry_query = entry_query.order_by(TI.duration.asc(), TI.map_index.asc())
elif order_by == "-duration":
entry_query = entry_query.order_by(TI.duration.desc(), TI.map_index.asc())
elif order_by == "start_date":
entry_query = entry_query.order_by(TI.start_date.asc(), TI.map_index.asc())
elif order_by == "-start_date":
entry_query = entry_query.order_by(TI.start_date.desc(), TI.map_index.asc())
elif order_by == "end_date":
entry_query = entry_query.order_by(TI.end_date.asc(), TI.map_index.asc())
elif order_by == "-end_date":
entry_query = entry_query.order_by(TI.end_date.desc(), TI.map_index.asc())
elif order_by == "-map_index":
entry_query = entry_query.order_by(TI.map_index.desc())
else:
raise BadRequest(detail=f"Ordering with '{order_by}' is not supported")
# using execute because we want the SlaMiss entity. Scalars don't return None for missing entities
task_instances = session.execute(entry_query.offset(offset).limit(limit)).all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
) |
Get list of task instances. | def get_task_instances(
*,
limit: int,
dag_id: str | None = None,
dag_run_id: str | None = None,
execution_date_gte: str | None = None,
execution_date_lte: str | None = None,
start_date_gte: str | None = None,
start_date_lte: str | None = None,
end_date_gte: str | None = None,
end_date_lte: str | None = None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
duration_gte: float | None = None,
duration_lte: float | None = None,
state: list[str] | None = None,
pool: list[str] | None = None,
queue: list[str] | None = None,
offset: int | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get list of task instances."""
# Because state can be 'none'
states = _convert_ti_states(state)
base_query = select(TI).join(TI.dag_run)
if dag_id != "~":
base_query = base_query.where(TI.dag_id == dag_id)
else:
base_query = base_query.where(TI.dag_id.in_(get_readable_dags()))
if dag_run_id != "~":
base_query = base_query.where(TI.run_id == dag_run_id)
base_query = _apply_range_filter(
base_query,
key=DR.execution_date,
value_range=(execution_date_gte, execution_date_lte),
)
base_query = _apply_range_filter(
base_query, key=TI.start_date, value_range=(start_date_gte, start_date_lte)
)
base_query = _apply_range_filter(base_query, key=TI.end_date, value_range=(end_date_gte, end_date_lte))
base_query = _apply_range_filter(base_query, key=TI.duration, value_range=(duration_gte, duration_lte))
base_query = _apply_range_filter(
base_query, key=TI.updated_at, value_range=(updated_at_gte, updated_at_lte)
)
base_query = _apply_array_filter(base_query, key=TI.state, values=states)
base_query = _apply_array_filter(base_query, key=TI.pool, values=pool)
base_query = _apply_array_filter(base_query, key=TI.queue, values=queue)
# Count elements before joining extra columns
total_entries = get_query_count(base_query, session=session)
# Add join
entry_query = (
base_query.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == DR.execution_date,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
.offset(offset)
.limit(limit)
)
# using execute because we want the SlaMiss entity. Scalars don't return None for missing entities
task_instances = session.execute(entry_query).all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
) |
Get list of task instances. | def get_task_instances_batch(session: Session = NEW_SESSION) -> APIResponse:
"""Get list of task instances."""
body = get_json_request_dict()
try:
data = task_instance_batch_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
dag_ids = data["dag_ids"]
if dag_ids:
requests: Sequence[IsAuthorizedDagRequest] = [
{
"method": "GET",
"details": DagDetails(id=id),
}
for id in dag_ids
]
if not get_auth_manager().batch_is_authorized_dag(requests):
raise PermissionDenied(detail=f"User not allowed to access some of these DAGs: {list(dag_ids)}")
else:
dag_ids = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
states = _convert_ti_states(data["state"])
base_query = select(TI).join(TI.dag_run)
base_query = _apply_array_filter(base_query, key=TI.dag_id, values=dag_ids)
base_query = _apply_array_filter(base_query, key=TI.run_id, values=data["dag_run_ids"])
base_query = _apply_array_filter(base_query, key=TI.task_id, values=data["task_ids"])
base_query = _apply_range_filter(
base_query,
key=DR.execution_date,
value_range=(data["execution_date_gte"], data["execution_date_lte"]),
)
base_query = _apply_range_filter(
base_query,
key=TI.start_date,
value_range=(data["start_date_gte"], data["start_date_lte"]),
)
base_query = _apply_range_filter(
base_query, key=TI.end_date, value_range=(data["end_date_gte"], data["end_date_lte"])
)
base_query = _apply_range_filter(
base_query, key=TI.duration, value_range=(data["duration_gte"], data["duration_lte"])
)
base_query = _apply_array_filter(base_query, key=TI.state, values=states)
base_query = _apply_array_filter(base_query, key=TI.pool, values=data["pool"])
base_query = _apply_array_filter(base_query, key=TI.queue, values=data["queue"])
# Count elements before joining extra columns
total_entries = get_query_count(base_query, session=session)
# Add join
base_query = base_query.join(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == DR.execution_date,
),
isouter=True,
).add_columns(SlaMiss)
ti_query = base_query.options(joinedload(TI.rendered_task_instance_fields))
# using execute because we want the SlaMiss entity. Scalars don't return None for missing entities
task_instances = session.execute(ti_query).all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
) |
Clear task instances. | def post_clear_task_instances(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Clear task instances."""
body = get_json_request_dict()
try:
data = clear_task_instance_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
error_message = f"Dag id {dag_id} not found"
raise NotFound(error_message)
reset_dag_runs = data.pop("reset_dag_runs")
dry_run = data.pop("dry_run")
# We always pass dry_run here, otherwise this would try to confirm on the terminal!
dag_run_id = data.pop("dag_run_id", None)
future = data.pop("include_future", False)
past = data.pop("include_past", False)
downstream = data.pop("include_downstream", False)
upstream = data.pop("include_upstream", False)
if dag_run_id is not None:
dag_run: DR | None = session.scalar(select(DR).where(DR.dag_id == dag_id, DR.run_id == dag_run_id))
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
data["start_date"] = dag_run.logical_date
data["end_date"] = dag_run.logical_date
if past:
data["start_date"] = None
if future:
data["end_date"] = None
task_ids = data.pop("task_ids", None)
if task_ids is not None:
task_id = [task[0] if isinstance(task, tuple) else task for task in task_ids]
dag = dag.partial_subset(
task_ids_or_regex=task_id,
include_downstream=downstream,
include_upstream=upstream,
)
if len(dag.task_dict) > 1:
# If we had upstream/downstream etc then also include those!
task_ids.extend(tid for tid in dag.task_dict if tid != task_id)
task_instances = dag.clear(dry_run=True, dag_bag=get_airflow_app().dag_bag, task_ids=task_ids, **data)
if not dry_run:
clear_task_instances(
task_instances,
session,
dag=dag,
dag_run_state=DagRunState.QUEUED if reset_dag_runs else False,
)
return task_instance_reference_collection_schema.dump(
TaskInstanceReferenceCollection(task_instances=task_instances)
) |