repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow
|
airflow-main/airflow/api_connexion/endpoints/task_instance_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Iterable, TypeVar
from marshmallow import ValidationError
from sqlalchemy import and_, func, or_, select
from sqlalchemy.exc import MultipleResultsFound
from sqlalchemy.orm import Session, joinedload
from sqlalchemy.sql import ClauseElement, Select
from airflow.api_connexion import security
from airflow.api_connexion.endpoints.request_dict import get_json_request_dict
from airflow.api_connexion.exceptions import BadRequest, NotFound
from airflow.api_connexion.parameters import format_datetime, format_parameters
from airflow.api_connexion.schemas.task_instance_schema import (
TaskInstanceCollection,
TaskInstanceReferenceCollection,
clear_task_instance_form,
set_single_task_instance_state_form,
set_task_instance_note_form_schema,
set_task_instance_state_form,
task_instance_batch_form,
task_instance_collection_schema,
task_instance_reference_collection_schema,
task_instance_reference_schema,
task_instance_schema,
)
from airflow.api_connexion.types import APIResponse
from airflow.models import SlaMiss
from airflow.models.dagrun import DagRun as DR
from airflow.models.operator import needs_expansion
from airflow.models.taskinstance import TaskInstance as TI, clear_task_instances
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import DagRunState, TaskInstanceState
T = TypeVar("T")
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_task_instance(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get task instance."""
query = (
select(TI)
.where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id)
.join(TI.dag_run)
.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.execution_date == DR.execution_date,
SlaMiss.task_id == TI.task_id,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
)
try:
task_instance = session.execute(query).one_or_none()
except MultipleResultsFound:
raise NotFound(
"Task instance not found", detail="Task instance is mapped, add the map_index value to the URL"
)
if task_instance is None:
raise NotFound("Task instance not found")
if task_instance[0].map_index != -1:
raise NotFound(
"Task instance not found", detail="Task instance is mapped, add the map_index value to the URL"
)
return task_instance_schema.dump(task_instance)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_mapped_task_instance(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
map_index: int,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get task instance."""
query = (
select(TI)
.where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id, TI.map_index == map_index)
.join(TI.dag_run)
.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.execution_date == DR.execution_date,
SlaMiss.task_id == TI.task_id,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
)
task_instance = session.execute(query).one_or_none()
if task_instance is None:
raise NotFound("Task instance not found")
return task_instance_schema.dump(task_instance)
@format_parameters(
{
"execution_date_gte": format_datetime,
"execution_date_lte": format_datetime,
"start_date_gte": format_datetime,
"start_date_lte": format_datetime,
"end_date_gte": format_datetime,
"end_date_lte": format_datetime,
"updated_at_gte": format_datetime,
"updated_at_lte": format_datetime,
},
)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_mapped_task_instances(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
execution_date_gte: str | None = None,
execution_date_lte: str | None = None,
start_date_gte: str | None = None,
start_date_lte: str | None = None,
end_date_gte: str | None = None,
end_date_lte: str | None = None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
duration_gte: float | None = None,
duration_lte: float | None = None,
state: list[str] | None = None,
pool: list[str] | None = None,
queue: list[str] | None = None,
limit: int | None = None,
offset: int | None = None,
order_by: str | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get list of task instances."""
# Because state can be 'none'
states = _convert_ti_states(state)
base_query = (
select(TI)
.where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id, TI.map_index >= 0)
.join(TI.dag_run)
)
# 0 can mean a mapped TI that expanded to an empty list, so it is not an automatic 404
unfiltered_total_count = session.execute(select(func.count("*")).select_from(base_query)).scalar()
if unfiltered_total_count == 0:
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
error_message = f"DAG {dag_id} not found"
raise NotFound(error_message)
task = dag.get_task(task_id)
if not task:
error_message = f"Task id {task_id} not found"
raise NotFound(error_message)
if not needs_expansion(task):
error_message = f"Task id {task_id} is not mapped"
raise NotFound(error_message)
# Other search criteria
base_query = _apply_range_filter(
base_query,
key=DR.execution_date,
value_range=(execution_date_gte, execution_date_lte),
)
base_query = _apply_range_filter(
base_query, key=TI.start_date, value_range=(start_date_gte, start_date_lte)
)
base_query = _apply_range_filter(base_query, key=TI.end_date, value_range=(end_date_gte, end_date_lte))
base_query = _apply_range_filter(base_query, key=TI.duration, value_range=(duration_gte, duration_lte))
base_query = _apply_range_filter(
base_query, key=TI.updated_at, value_range=(updated_at_gte, updated_at_lte)
)
base_query = _apply_array_filter(base_query, key=TI.state, values=states)
base_query = _apply_array_filter(base_query, key=TI.pool, values=pool)
base_query = _apply_array_filter(base_query, key=TI.queue, values=queue)
# Count elements before joining extra columns
total_entries = session.execute(select(func.count("*")).select_from(base_query)).scalar()
# Add SLA miss
entry_query = (
base_query.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == DR.execution_date,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
)
if order_by:
if order_by == "state":
entry_query = entry_query.order_by(TI.state.asc(), TI.map_index.asc())
elif order_by == "-state":
entry_query = entry_query.order_by(TI.state.desc(), TI.map_index.asc())
elif order_by == "-map_index":
entry_query = entry_query.order_by(TI.map_index.desc())
else:
raise BadRequest(detail=f"Ordering with '{order_by}' is not supported")
else:
entry_query = entry_query.order_by(TI.map_index.asc())
# using execute because we want the SlaMiss entity. Scalars don't return None for missing entities
task_instances = session.execute(entry_query.offset(offset).limit(limit)).all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
)
def _convert_ti_states(states: Iterable[str] | None) -> list[TaskInstanceState | None] | None:
if not states:
return None
return [None if s == "none" else TaskInstanceState(s) for s in states]
def _apply_array_filter(query: Select, key: ClauseElement, values: Iterable[Any] | None) -> Select:
if values is not None:
cond = ((key == v) for v in values)
query = query.where(or_(*cond))
return query
def _apply_range_filter(query: Select, key: ClauseElement, value_range: tuple[T, T]) -> Select:
gte_value, lte_value = value_range
if gte_value is not None:
query = query.where(key >= gte_value)
if lte_value is not None:
query = query.where(key <= lte_value)
return query
@format_parameters(
{
"execution_date_gte": format_datetime,
"execution_date_lte": format_datetime,
"start_date_gte": format_datetime,
"start_date_lte": format_datetime,
"end_date_gte": format_datetime,
"end_date_lte": format_datetime,
"updated_at_gte": format_datetime,
"updated_at_lte": format_datetime,
},
)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_task_instances(
*,
limit: int,
dag_id: str | None = None,
dag_run_id: str | None = None,
execution_date_gte: str | None = None,
execution_date_lte: str | None = None,
start_date_gte: str | None = None,
start_date_lte: str | None = None,
end_date_gte: str | None = None,
end_date_lte: str | None = None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
duration_gte: float | None = None,
duration_lte: float | None = None,
state: list[str] | None = None,
pool: list[str] | None = None,
queue: list[str] | None = None,
offset: int | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get list of task instances."""
# Because state can be 'none'
states = _convert_ti_states(state)
base_query = select(TI).join(TI.dag_run)
if dag_id != "~":
base_query = base_query.where(TI.dag_id == dag_id)
if dag_run_id != "~":
base_query = base_query.where(TI.run_id == dag_run_id)
base_query = _apply_range_filter(
base_query,
key=DR.execution_date,
value_range=(execution_date_gte, execution_date_lte),
)
base_query = _apply_range_filter(
base_query, key=TI.start_date, value_range=(start_date_gte, start_date_lte)
)
base_query = _apply_range_filter(base_query, key=TI.end_date, value_range=(end_date_gte, end_date_lte))
base_query = _apply_range_filter(base_query, key=TI.duration, value_range=(duration_gte, duration_lte))
base_query = _apply_range_filter(
base_query, key=TI.updated_at, value_range=(updated_at_gte, updated_at_lte)
)
base_query = _apply_array_filter(base_query, key=TI.state, values=states)
base_query = _apply_array_filter(base_query, key=TI.pool, values=pool)
base_query = _apply_array_filter(base_query, key=TI.queue, values=queue)
# Count elements before joining extra columns
count_query = select(func.count("*")).select_from(base_query)
total_entries = session.execute(count_query).scalar()
# Add join
entry_query = (
base_query.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == DR.execution_date,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
.offset(offset)
.limit(limit)
)
# using execute because we want the SlaMiss entity. Scalars don't return None for missing entities
task_instances = session.execute(entry_query).all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_task_instances_batch(session: Session = NEW_SESSION) -> APIResponse:
"""Get list of task instances."""
body = get_json_request_dict()
try:
data = task_instance_batch_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
states = _convert_ti_states(data["state"])
base_query = select(TI).join(TI.dag_run)
base_query = _apply_array_filter(base_query, key=TI.dag_id, values=data["dag_ids"])
base_query = _apply_range_filter(
base_query,
key=DR.execution_date,
value_range=(data["execution_date_gte"], data["execution_date_lte"]),
)
base_query = _apply_range_filter(
base_query,
key=TI.start_date,
value_range=(data["start_date_gte"], data["start_date_lte"]),
)
base_query = _apply_range_filter(
base_query, key=TI.end_date, value_range=(data["end_date_gte"], data["end_date_lte"])
)
base_query = _apply_range_filter(
base_query, key=TI.duration, value_range=(data["duration_gte"], data["duration_lte"])
)
base_query = _apply_array_filter(base_query, key=TI.state, values=states)
base_query = _apply_array_filter(base_query, key=TI.pool, values=data["pool"])
base_query = _apply_array_filter(base_query, key=TI.queue, values=data["queue"])
# Count elements before joining extra columns
total_entries = session.execute(select(func.count("*")).select_from(base_query)).scalar()
# Add join
base_query = base_query.join(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == DR.execution_date,
),
isouter=True,
).add_columns(SlaMiss)
ti_query = base_query.options(joinedload(TI.rendered_task_instance_fields))
# using execute because we want the SlaMiss entity. Scalars don't return None for missing entities
task_instances = session.execute(ti_query).all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
)
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def post_clear_task_instances(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Clear task instances."""
body = get_json_request_dict()
try:
data = clear_task_instance_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
error_message = f"Dag id {dag_id} not found"
raise NotFound(error_message)
reset_dag_runs = data.pop("reset_dag_runs")
dry_run = data.pop("dry_run")
# We always pass dry_run here, otherwise this would try to confirm on the terminal!
dag_run_id = data.pop("dag_run_id", None)
future = data.pop("include_future", False)
past = data.pop("include_past", False)
downstream = data.pop("include_downstream", False)
upstream = data.pop("include_upstream", False)
if dag_run_id is not None:
dag_run: DR | None = session.scalar(select(DR).where(DR.dag_id == dag_id, DR.run_id == dag_run_id))
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
data["start_date"] = dag_run.logical_date
data["end_date"] = dag_run.logical_date
if past:
data["start_date"] = None
if future:
data["end_date"] = None
task_ids = data.pop("task_ids", None)
if task_ids is not None:
task_id = [task[0] if isinstance(task, tuple) else task for task in task_ids]
dag = dag.partial_subset(
task_ids_or_regex=task_id,
include_downstream=downstream,
include_upstream=upstream,
)
if len(dag.task_dict) > 1:
# If we had upstream/downstream etc then also include those!
task_ids.extend(tid for tid in dag.task_dict if tid != task_id)
task_instances = dag.clear(dry_run=True, dag_bag=get_airflow_app().dag_bag, task_ids=task_ids, **data)
if not dry_run:
clear_task_instances(
task_instances,
session,
dag=dag,
dag_run_state=DagRunState.QUEUED if reset_dag_runs else False,
)
return task_instance_reference_collection_schema.dump(
TaskInstanceReferenceCollection(task_instances=task_instances)
)
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def post_set_task_instances_state(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Set a state of task instances."""
body = get_json_request_dict()
try:
data = set_task_instance_state_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
error_message = f"Dag ID {dag_id} not found"
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound(error_message)
task_id = data["task_id"]
task = dag.task_dict.get(task_id)
if not task:
error_message = f"Task ID {task_id} not found"
raise NotFound(error_message)
execution_date = data.get("execution_date")
run_id = data.get("dag_run_id")
if (
execution_date
and (
session.scalars(
select(TI).where(
TI.task_id == task_id, TI.dag_id == dag_id, TI.execution_date == execution_date
)
).one_or_none()
)
is None
):
raise NotFound(
detail=f"Task instance not found for task {task_id!r} on execution_date {execution_date}"
)
if run_id and not session.get(
TI, {"task_id": task_id, "dag_id": dag_id, "run_id": run_id, "map_index": -1}
):
error_message = f"Task instance not found for task {task_id!r} on DAG run with ID {run_id!r}"
raise NotFound(detail=error_message)
tis = dag.set_task_instance_state(
task_id=task_id,
run_id=run_id,
execution_date=execution_date,
state=data["new_state"],
upstream=data["include_upstream"],
downstream=data["include_downstream"],
future=data["include_future"],
past=data["include_past"],
commit=not data["dry_run"],
session=session,
)
return task_instance_reference_collection_schema.dump(TaskInstanceReferenceCollection(task_instances=tis))
def set_mapped_task_instance_note(
*, dag_id: str, dag_run_id: str, task_id: str, map_index: int
) -> APIResponse:
"""Set the note for a Mapped Task instance."""
return set_task_instance_note(dag_id=dag_id, dag_run_id=dag_run_id, task_id=task_id, map_index=map_index)
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def patch_task_instance(
*, dag_id: str, dag_run_id: str, task_id: str, map_index: int = -1, session: Session = NEW_SESSION
) -> APIResponse:
"""Update the state of a task instance."""
body = get_json_request_dict()
try:
data = set_single_task_instance_state_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found", detail=f"DAG {dag_id!r} not found")
if not dag.has_task(task_id):
raise NotFound("Task not found", detail=f"Task {task_id!r} not found in DAG {dag_id!r}")
ti: TI | None = session.get(
TI, {"task_id": task_id, "dag_id": dag_id, "run_id": dag_run_id, "map_index": map_index}
)
if not ti:
error_message = f"Task instance not found for task {task_id!r} on DAG run with ID {dag_run_id!r}"
raise NotFound(detail=error_message)
if not data["dry_run"]:
ti = dag.set_task_instance_state(
task_id=task_id,
run_id=dag_run_id,
map_indexes=[map_index],
state=data["new_state"],
commit=True,
session=session,
)
return task_instance_reference_schema.dump(ti)
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def patch_mapped_task_instance(
*, dag_id: str, dag_run_id: str, task_id: str, map_index: int, session: Session = NEW_SESSION
) -> APIResponse:
"""Update the state of a mapped task instance."""
return patch_task_instance(
dag_id=dag_id, dag_run_id=dag_run_id, task_id=task_id, map_index=map_index, session=session
)
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def set_task_instance_note(
*, dag_id: str, dag_run_id: str, task_id: str, map_index: int = -1, session: Session = NEW_SESSION
) -> APIResponse:
"""Set the note for a Task instance. This supports both Mapped and non-Mapped Task instances."""
try:
post_body = set_task_instance_note_form_schema.load(get_json_request_dict())
new_note = post_body["note"]
except ValidationError as err:
raise BadRequest(detail=str(err))
query = (
select(TI)
.where(TI.dag_id == dag_id, TI.run_id == dag_run_id, TI.task_id == task_id)
.join(TI.dag_run)
.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.execution_date == DR.execution_date,
SlaMiss.task_id == TI.task_id,
),
)
.add_columns(SlaMiss)
.options(joinedload(TI.rendered_task_instance_fields))
)
if map_index == -1:
query = query.where(or_(TI.map_index == -1, TI.map_index is None))
else:
query = query.where(TI.map_index == map_index)
try:
result = session.execute(query).one_or_none()
except MultipleResultsFound:
raise NotFound(
"Task instance not found", detail="Task instance is mapped, add the map_index value to the URL"
)
if result is None:
error_message = f"Task Instance not found for dag_id={dag_id}, run_id={dag_run_id}, task_id={task_id}"
raise NotFound(error_message)
ti, sla_miss = result
from flask_login import current_user
current_user_id = getattr(current_user, "id", None)
if ti.task_instance_note is None:
ti.note = (new_note, current_user_id)
else:
ti.task_instance_note.content = new_note
ti.task_instance_note.user_id = current_user_id
session.commit()
return task_instance_schema.dump((ti, sla_miss))
| 25,747 | 35.521986 | 110 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/health_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.api.common.airflow_health import get_airflow_health
from airflow.api_connexion.schemas.health_schema import health_schema
from airflow.api_connexion.types import APIResponse
def get_health() -> APIResponse:
"""Return the health of the airflow scheduler, metadatabase and triggerer."""
airflow_health_status = get_airflow_health()
return health_schema.dump(airflow_health_status)
| 1,227 | 42.857143 | 81 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/config_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from flask import Response, request
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import NotFound, PermissionDenied
from airflow.api_connexion.schemas.config_schema import Config, ConfigOption, ConfigSection, config_schema
from airflow.configuration import conf
from airflow.security import permissions
from airflow.settings import json
LINE_SEP = "\n" # `\n` cannot appear in f-strings
def _conf_dict_to_config(conf_dict: dict) -> Config:
"""Convert config dict to a Config object."""
config = Config(
sections=[
ConfigSection(
name=section, options=[ConfigOption(key=key, value=value) for key, value in options.items()]
)
for section, options in conf_dict.items()
]
)
return config
def _option_to_text(config_option: ConfigOption) -> str:
"""Convert a single config option to text."""
return f"{config_option.key} = {config_option.value}"
def _section_to_text(config_section: ConfigSection) -> str:
"""Convert a single config section to text."""
return (
f"[{config_section.name}]{LINE_SEP}"
f"{LINE_SEP.join(_option_to_text(option) for option in config_section.options)}{LINE_SEP}"
)
def _config_to_text(config: Config) -> str:
"""Convert the entire config to text."""
return LINE_SEP.join(_section_to_text(s) for s in config.sections)
def _config_to_json(config: Config) -> str:
"""Convert a Config object to a JSON formatted string."""
return json.dumps(config_schema.dump(config), indent=4)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG)])
def get_config(*, section: str | None = None) -> Response:
"""Get current configuration."""
serializer = {
"text/plain": _config_to_text,
"application/json": _config_to_json,
}
return_type = request.accept_mimetypes.best_match(serializer.keys())
if conf.get("webserver", "expose_config").lower() == "non-sensitive-only":
expose_config = True
display_sensitive = False
else:
expose_config = conf.getboolean("webserver", "expose_config")
display_sensitive = True
if return_type not in serializer:
return Response(status=HTTPStatus.NOT_ACCEPTABLE)
elif expose_config:
if section and not conf.has_section(section):
raise NotFound("section not found.", detail=f"section={section} not found.")
conf_dict = conf.as_dict(display_source=False, display_sensitive=display_sensitive)
if section:
conf_section_value = conf_dict[section]
conf_dict.clear()
conf_dict[section] = conf_section_value
config = _conf_dict_to_config(conf_dict)
config_text = serializer[return_type](config)
return Response(config_text, headers={"Content-Type": return_type})
else:
raise PermissionDenied(
detail=(
"Your Airflow administrator chose not to expose the configuration, most likely for security"
" reasons."
)
)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG)])
def get_value(section: str, option: str) -> Response:
serializer = {
"text/plain": _config_to_text,
"application/json": _config_to_json,
}
return_type = request.accept_mimetypes.best_match(serializer.keys())
if conf.get("webserver", "expose_config").lower() == "non-sensitive-only":
expose_config = True
else:
expose_config = conf.getboolean("webserver", "expose_config")
if return_type not in serializer:
return Response(status=HTTPStatus.NOT_ACCEPTABLE)
elif expose_config:
if not conf.has_option(section, option):
raise NotFound(
"Config not found.", detail=f"The option [{section}/{option}] is not found in config."
)
print(conf.sensitive_config_values)
if (section, option) in conf.sensitive_config_values:
value = "< hidden >"
else:
value = conf.get(section, option)
config = Config(
sections=[ConfigSection(name=section, options=[ConfigOption(key=option, value=value)])]
)
config_text = serializer[return_type](config)
return Response(config_text, headers={"Content-Type": return_type})
else:
raise PermissionDenied(
detail=(
"Your Airflow administrator chose not to expose the configuration, most likely for security"
" reasons."
)
)
| 5,486 | 37.104167 | 108 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/extra_link_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import select
from sqlalchemy.orm.session import Session
from airflow import DAG
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import NotFound
from airflow.api_connexion.types import APIResponse
from airflow.exceptions import TaskNotFound
from airflow.models.dagbag import DagBag
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_extra_links(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get extra links for task instance."""
from airflow.models.taskinstance import TaskInstance
dagbag: DagBag = get_airflow_app().dag_bag
dag: DAG = dagbag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found", detail=f'DAG with ID = "{dag_id}" not found')
try:
task = dag.get_task(task_id)
except TaskNotFound:
raise NotFound("Task not found", detail=f'Task with ID = "{task_id}" not found')
ti = session.scalar(
select(TaskInstance).where(
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == dag_run_id,
TaskInstance.task_id == task_id,
)
)
if not ti:
raise NotFound("DAG Run not found", detail=f'DAG Run with ID = "{dag_run_id}" not found')
all_extra_link_pairs = (
(link_name, task.get_extra_links(ti, link_name)) for link_name in task.extra_links
)
all_extra_links = {
link_name: link_url if link_url else None for link_name, link_url in sorted(all_extra_link_pairs)
}
return all_extra_links
| 2,793 | 34.367089 | 105 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/connection_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
from http import HTTPStatus
from connexion import NoContent
from flask import Response, request
from marshmallow import ValidationError
from sqlalchemy import func, select
from sqlalchemy.orm import Session
from airflow.api_connexion import security
from airflow.api_connexion.endpoints.update_mask import extract_update_mask_data
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.connection_schema import (
ConnectionCollection,
connection_collection_schema,
connection_schema,
connection_test_schema,
)
from airflow.api_connexion.types import APIResponse, UpdateMask
from airflow.configuration import conf
from airflow.models import Connection
from airflow.secrets.environment_variables import CONN_ENV_PREFIX
from airflow.security import permissions
from airflow.utils import helpers
from airflow.utils.log.action_logger import action_event_from_permission
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.strings import get_random_string
from airflow.www.decorators import action_logging
RESOURCE_EVENT_PREFIX = "connection"
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION)])
@provide_session
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_DELETE,
),
)
def delete_connection(*, connection_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete a connection entry."""
connection = session.scalar(select(Connection).filter_by(conn_id=connection_id))
if connection is None:
raise NotFound(
"Connection not found",
detail=f"The Connection with connection_id: `{connection_id}` was not found",
)
session.delete(connection)
return NoContent, HTTPStatus.NO_CONTENT
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION)])
@provide_session
def get_connection(*, connection_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a connection entry."""
connection = session.scalar(select(Connection).where(Connection.conn_id == connection_id))
if connection is None:
raise NotFound(
"Connection not found",
detail=f"The Connection with connection_id: `{connection_id}` was not found",
)
return connection_schema.dump(connection)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION)])
@format_parameters({"limit": check_limit})
@provide_session
def get_connections(
*,
limit: int,
offset: int = 0,
order_by: str = "id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all connection entries."""
to_replace = {"connection_id": "conn_id"}
allowed_filter_attrs = ["connection_id", "conn_type", "description", "host", "port", "id"]
total_entries = session.execute(select(func.count(Connection.id))).scalar_one()
query = select(Connection)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
connections = session.scalars(query.offset(offset).limit(limit)).all()
return connection_collection_schema.dump(
ConnectionCollection(connections=connections, total_entries=total_entries)
)
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION)])
@provide_session
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_EDIT,
),
)
def patch_connection(
*,
connection_id: str,
update_mask: UpdateMask = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Update a connection entry."""
try:
data = connection_schema.load(request.json, partial=True)
except ValidationError as err:
# If validation get to here, it is extra field validation.
raise BadRequest(detail=str(err.messages))
non_update_fields = ["connection_id", "conn_id"]
connection = session.scalar(select(Connection).filter_by(conn_id=connection_id).limit(1))
if connection is None:
raise NotFound(
"Connection not found",
detail=f"The Connection with connection_id: `{connection_id}` was not found",
)
if data.get("conn_id") and connection.conn_id != data["conn_id"]:
raise BadRequest(detail="The connection_id cannot be updated.")
if update_mask:
data = extract_update_mask_data(update_mask, non_update_fields, data)
for key in data:
setattr(connection, key, data[key])
session.add(connection)
session.commit()
return connection_schema.dump(connection)
@security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION)])
@provide_session
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_CREATE,
),
)
def post_connection(*, session: Session = NEW_SESSION) -> APIResponse:
"""Create connection entry."""
body = request.json
try:
data = connection_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
conn_id = data["conn_id"]
try:
helpers.validate_key(conn_id, max_length=200)
except Exception as e:
raise BadRequest(detail=str(e))
connection = session.scalar(select(Connection).filter_by(conn_id=conn_id).limit(1))
if not connection:
connection = Connection(**data)
session.add(connection)
session.commit()
return connection_schema.dump(connection)
raise AlreadyExists(detail=f"Connection already exist. ID: {conn_id}")
@security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION)])
def test_connection() -> APIResponse:
"""
Test an API connection.
This method first creates an in-memory transient conn_id & exports that to an
env var, as some hook classes tries to find out the conn from their __init__ method & errors out
if not found. It also deletes the conn id env variable after the test.
"""
if conf.get("core", "test_connection", fallback="Disabled").lower().strip() != "enabled":
return Response(
"Testing connections is disabled in Airflow configuration. Contact your deployment admin to "
"enable it.",
403,
)
body = request.json
transient_conn_id = get_random_string()
conn_env_var = f"{CONN_ENV_PREFIX}{transient_conn_id.upper()}"
try:
data = connection_schema.load(body)
data["conn_id"] = transient_conn_id
conn = Connection(**data)
os.environ[conn_env_var] = conn.get_uri()
status, message = conn.test_connection()
return connection_test_schema.dump({"status": status, "message": message})
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
finally:
if conn_env_var in os.environ:
del os.environ[conn_env_var]
| 8,034 | 38.004854 | 105 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/dag_run_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
import pendulum
from connexion import NoContent
from flask import g
from flask_login import current_user
from marshmallow import ValidationError
from sqlalchemy import delete, func, or_, select
from sqlalchemy.orm import Session
from sqlalchemy.sql import Select
from airflow.api.common.mark_tasks import (
set_dag_run_state_to_failed,
set_dag_run_state_to_queued,
set_dag_run_state_to_success,
)
from airflow.api_connexion import security
from airflow.api_connexion.endpoints.request_dict import get_json_request_dict
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_datetime, format_parameters
from airflow.api_connexion.schemas.dag_run_schema import (
DAGRunCollection,
clear_dagrun_form_schema,
dagrun_collection_schema,
dagrun_schema,
dagruns_batch_form_schema,
set_dagrun_note_form_schema,
set_dagrun_state_form_schema,
)
from airflow.api_connexion.schemas.dataset_schema import (
DatasetEventCollection,
dataset_event_collection_schema,
)
from airflow.api_connexion.schemas.task_instance_schema import (
TaskInstanceReferenceCollection,
task_instance_reference_collection_schema,
)
from airflow.api_connexion.types import APIResponse
from airflow.models import DagModel, DagRun
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.log.action_logger import action_event_from_permission
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import DagRunState
from airflow.utils.types import DagRunType
from airflow.www.decorators import action_logging
RESOURCE_EVENT_PREFIX = "dag_run"
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def delete_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete a DAG Run."""
deleted_count = session.execute(
delete(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
).rowcount
if deleted_count == 0:
raise NotFound(detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found")
return NoContent, HTTPStatus.NO_CONTENT
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def get_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a DAG Run."""
dag_run = session.scalar(select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id))
if dag_run is None:
raise NotFound(
"DAGRun not found",
detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found",
)
return dagrun_schema.dump(dag_run)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
],
)
@provide_session
def get_upstream_dataset_events(
*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION
) -> APIResponse:
"""If dag run is dataset-triggered, return the dataset events that triggered it."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(
DagRun.dag_id == dag_id,
DagRun.run_id == dag_run_id,
)
)
if dag_run is None:
raise NotFound(
"DAGRun not found",
detail=f"DAGRun with DAG ID: '{dag_id}' and DagRun ID: '{dag_run_id}' not found",
)
events = dag_run.consumed_dataset_events
return dataset_event_collection_schema.dump(
DatasetEventCollection(dataset_events=events, total_entries=len(events))
)
def _fetch_dag_runs(
query: Select,
*,
end_date_gte: str | None,
end_date_lte: str | None,
execution_date_gte: str | None,
execution_date_lte: str | None,
start_date_gte: str | None,
start_date_lte: str | None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
limit: int | None,
offset: int | None,
order_by: str,
session: Session,
) -> tuple[list[DagRun], int]:
if start_date_gte:
query = query.where(DagRun.start_date >= start_date_gte)
if start_date_lte:
query = query.where(DagRun.start_date <= start_date_lte)
# filter execution date
if execution_date_gte:
query = query.where(DagRun.execution_date >= execution_date_gte)
if execution_date_lte:
query = query.where(DagRun.execution_date <= execution_date_lte)
# filter end date
if end_date_gte:
query = query.where(DagRun.end_date >= end_date_gte)
if end_date_lte:
query = query.where(DagRun.end_date <= end_date_lte)
# filter updated at
if updated_at_gte:
query = query.where(DagRun.updated_at >= updated_at_gte)
if updated_at_lte:
query = query.where(DagRun.updated_at <= updated_at_lte)
total_entries = session.scalar(select(func.count()).select_from(query))
to_replace = {"dag_run_id": "run_id"}
allowed_filter_attrs = [
"id",
"state",
"dag_id",
"execution_date",
"dag_run_id",
"start_date",
"end_date",
"updated_at",
"external_trigger",
"conf",
]
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
return session.scalars(query.offset(offset).limit(limit)).all(), total_entries
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
],
)
@format_parameters(
{
"start_date_gte": format_datetime,
"start_date_lte": format_datetime,
"execution_date_gte": format_datetime,
"execution_date_lte": format_datetime,
"end_date_gte": format_datetime,
"end_date_lte": format_datetime,
"updated_at_gte": format_datetime,
"updated_at_lte": format_datetime,
"limit": check_limit,
}
)
@provide_session
def get_dag_runs(
*,
dag_id: str,
start_date_gte: str | None = None,
start_date_lte: str | None = None,
execution_date_gte: str | None = None,
execution_date_lte: str | None = None,
end_date_gte: str | None = None,
end_date_lte: str | None = None,
updated_at_gte: str | None = None,
updated_at_lte: str | None = None,
state: list[str] | None = None,
offset: int | None = None,
limit: int | None = None,
order_by: str = "id",
session: Session = NEW_SESSION,
):
"""Get all DAG Runs."""
query = select(DagRun)
# This endpoint allows specifying ~ as the dag_id to retrieve DAG Runs for all DAGs.
if dag_id == "~":
appbuilder = get_airflow_app().appbuilder
query = query.where(DagRun.dag_id.in_(appbuilder.sm.get_readable_dag_ids(g.user)))
else:
query = query.where(DagRun.dag_id == dag_id)
if state:
query = query.where(DagRun.state.in_(state))
dag_run, total_entries = _fetch_dag_runs(
query,
end_date_gte=end_date_gte,
end_date_lte=end_date_lte,
execution_date_gte=execution_date_gte,
execution_date_lte=execution_date_lte,
start_date_gte=start_date_gte,
start_date_lte=start_date_lte,
updated_at_gte=updated_at_gte,
updated_at_lte=updated_at_lte,
limit=limit,
offset=offset,
order_by=order_by,
session=session,
)
return dagrun_collection_schema.dump(DAGRunCollection(dag_runs=dag_run, total_entries=total_entries))
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def get_dag_runs_batch(*, session: Session = NEW_SESSION) -> APIResponse:
"""Get list of DAG Runs."""
body = get_json_request_dict()
try:
data = dagruns_batch_form_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
appbuilder = get_airflow_app().appbuilder
readable_dag_ids = appbuilder.sm.get_readable_dag_ids(g.user)
query = select(DagRun)
if data.get("dag_ids"):
dag_ids = set(data["dag_ids"]) & set(readable_dag_ids)
query = query.where(DagRun.dag_id.in_(dag_ids))
else:
query = query.where(DagRun.dag_id.in_(readable_dag_ids))
states = data.get("states")
if states:
query = query.where(DagRun.state.in_(states))
dag_runs, total_entries = _fetch_dag_runs(
query,
end_date_gte=data["end_date_gte"],
end_date_lte=data["end_date_lte"],
execution_date_gte=data["execution_date_gte"],
execution_date_lte=data["execution_date_lte"],
start_date_gte=data["start_date_gte"],
start_date_lte=data["start_date_lte"],
limit=data["page_limit"],
offset=data["page_offset"],
order_by=data.get("order_by", "id"),
session=session,
)
return dagrun_collection_schema.dump(DAGRunCollection(dag_runs=dag_runs, total_entries=total_entries))
@security.requires_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_CREATE,
),
)
def post_dag_run(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Trigger a DAG."""
dm = session.scalar(select(DagModel).where(DagModel.is_active, DagModel.dag_id == dag_id).limit(1))
if not dm:
raise NotFound(title="DAG not found", detail=f"DAG with dag_id: '{dag_id}' not found")
if dm.has_import_errors:
raise BadRequest(
title="DAG cannot be triggered",
detail=f"DAG with dag_id: '{dag_id}' has import errors",
)
try:
post_body = dagrun_schema.load(get_json_request_dict(), session=session)
except ValidationError as err:
raise BadRequest(detail=str(err))
logical_date = pendulum.instance(post_body["execution_date"])
run_id = post_body["run_id"]
dagrun_instance = session.scalar(
select(DagRun)
.where(
DagRun.dag_id == dag_id,
or_(DagRun.run_id == run_id, DagRun.execution_date == logical_date),
)
.limit(1)
)
if not dagrun_instance:
try:
dag = get_airflow_app().dag_bag.get_dag(dag_id)
dag_run = dag.create_dagrun(
run_type=DagRunType.MANUAL,
run_id=run_id,
execution_date=logical_date,
data_interval=dag.timetable.infer_manual_data_interval(run_after=logical_date),
state=DagRunState.QUEUED,
conf=post_body.get("conf"),
external_trigger=True,
dag_hash=get_airflow_app().dag_bag.dags_hash.get(dag_id),
session=session,
)
dag_run_note = post_body.get("note")
if dag_run_note:
current_user_id = getattr(current_user, "id", None)
dag_run.note = (dag_run_note, current_user_id)
return dagrun_schema.dump(dag_run)
except ValueError as ve:
raise BadRequest(detail=str(ve))
if dagrun_instance.execution_date == logical_date:
raise AlreadyExists(
detail=(
f"DAGRun with DAG ID: '{dag_id}' and "
f"DAGRun logical date: '{logical_date.isoformat(sep=' ')}' already exists"
),
)
raise AlreadyExists(detail=f"DAGRun with DAG ID: '{dag_id}' and DAGRun ID: '{run_id}' already exists")
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def update_dag_run_state(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Set a state of a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = set_dagrun_state_form_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest(detail=str(err))
state = post_body["state"]
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if state == DagRunState.SUCCESS:
set_dag_run_state_to_success(dag=dag, run_id=dag_run.run_id, commit=True)
elif state == DagRunState.QUEUED:
set_dag_run_state_to_queued(dag=dag, run_id=dag_run.run_id, commit=True)
else:
set_dag_run_state_to_failed(dag=dag, run_id=dag_run.run_id, commit=True)
dag_run = session.get(DagRun, dag_run.id)
return dagrun_schema.dump(dag_run)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def clear_dag_run(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Clear a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = clear_dagrun_form_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest(detail=str(err))
dry_run = post_body.get("dry_run", False)
dag = get_airflow_app().dag_bag.get_dag(dag_id)
start_date = dag_run.logical_date
end_date = dag_run.logical_date
if dry_run:
task_instances = dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=None,
include_subdags=True,
include_parentdag=True,
only_failed=False,
dry_run=True,
)
return task_instance_reference_collection_schema.dump(
TaskInstanceReferenceCollection(task_instances=task_instances)
)
else:
dag.clear(
start_date=start_date,
end_date=end_date,
task_ids=None,
include_subdags=True,
include_parentdag=True,
only_failed=False,
)
dag_run = session.execute(select(DagRun).where(DagRun.id == dag_run.id)).scalar_one()
return dagrun_schema.dump(dag_run)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
],
)
@provide_session
def set_dag_run_note(*, dag_id: str, dag_run_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Set the note for a dag run."""
dag_run: DagRun | None = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == dag_run_id)
)
if dag_run is None:
error_message = f"Dag Run id {dag_run_id} not found in dag {dag_id}"
raise NotFound(error_message)
try:
post_body = set_dagrun_note_form_schema.load(get_json_request_dict())
new_note = post_body["note"]
except ValidationError as err:
raise BadRequest(detail=str(err))
current_user_id = getattr(current_user, "id", None)
if dag_run.dag_run_note is None:
dag_run.note = (new_note, current_user_id)
else:
dag_run.dag_run_note.content = new_note
dag_run.dag_run_note.user_id = current_user_id
session.commit()
return dagrun_schema.dump(dag_run)
| 17,161 | 34.532091 | 107 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/dag_source_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from flask import Response, current_app, request
from itsdangerous import BadSignature, URLSafeSerializer
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import NotFound
from airflow.api_connexion.schemas.dag_source_schema import dag_source_schema
from airflow.models.dagcode import DagCode
from airflow.security import permissions
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE)])
def get_dag_source(*, file_token: str) -> Response:
"""Get source code using file token."""
secret_key = current_app.config["SECRET_KEY"]
auth_s = URLSafeSerializer(secret_key)
try:
path = auth_s.loads(file_token)
dag_source = DagCode.code(path)
except (BadSignature, FileNotFoundError):
raise NotFound("Dag source not found")
return_type = request.accept_mimetypes.best_match(["text/plain", "application/json"])
if return_type == "text/plain":
return Response(dag_source, headers={"Content-Type": return_type})
if return_type == "application/json":
content = dag_source_schema.dumps(dict(content=dag_source))
return Response(content, headers={"Content-Type": return_type})
return Response("Not Allowed Accept Header", status=HTTPStatus.NOT_ACCEPTABLE)
| 2,146 | 42.816327 | 89 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/version_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
import airflow
from airflow.api_connexion.schemas.version_schema import version_info_schema
from airflow.api_connexion.types import APIResponse
from airflow.utils.platform import get_airflow_git_version
class VersionInfo(NamedTuple):
"""Version information."""
version: str
git_version: str | None
def get_version() -> APIResponse:
"""Get version information."""
airflow_version = airflow.__version__
git_version = get_airflow_git_version()
version_info = VersionInfo(version=airflow_version, git_version=git_version)
return version_info_schema.dump(version_info)
| 1,454 | 34.487805 | 80 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/request_dict.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Mapping, cast
def get_json_request_dict() -> Mapping[str, Any]:
"""Cast request dictionary to JSON."""
from flask import request
return cast(Mapping[str, Any], request.get_json())
| 1,040 | 37.555556 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/pool_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from flask import Response
from marshmallow import ValidationError
from sqlalchemy import delete, func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from airflow.api_connexion import security
from airflow.api_connexion.endpoints.request_dict import get_json_request_dict
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.pool_schema import PoolCollection, pool_collection_schema, pool_schema
from airflow.api_connexion.types import APIResponse, UpdateMask
from airflow.models.pool import Pool
from airflow.security import permissions
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL)])
@provide_session
def delete_pool(*, pool_name: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete a pool."""
if pool_name == "default_pool":
raise BadRequest(detail="Default Pool can't be deleted")
affected_count = session.execute(delete(Pool).where(Pool.pool == pool_name)).rowcount
if affected_count == 0:
raise NotFound(detail=f"Pool with name:'{pool_name}' not found")
return Response(status=HTTPStatus.NO_CONTENT)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL)])
@provide_session
def get_pool(*, pool_name: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a pool."""
obj = session.scalar(select(Pool).where(Pool.pool == pool_name))
if obj is None:
raise NotFound(detail=f"Pool with name:'{pool_name}' not found")
return pool_schema.dump(obj)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL)])
@format_parameters({"limit": check_limit})
@provide_session
def get_pools(
*,
limit: int,
order_by: str = "id",
offset: int | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all pools."""
to_replace = {"name": "pool"}
allowed_filter_attrs = ["name", "slots", "id"]
total_entries = session.scalars(func.count(Pool.id)).one()
query = select(Pool)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
pools = session.scalars(query.offset(offset).limit(limit)).all()
return pool_collection_schema.dump(PoolCollection(pools=pools, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL)])
@provide_session
def patch_pool(
*,
pool_name: str,
update_mask: UpdateMask = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Update a pool."""
request_dict = get_json_request_dict()
# Only slots can be modified in 'default_pool'
try:
if pool_name == Pool.DEFAULT_POOL_NAME and request_dict["name"] != Pool.DEFAULT_POOL_NAME:
if update_mask and len(update_mask) == 1 and update_mask[0].strip() == "slots":
pass
else:
raise BadRequest(detail="Default Pool's name can't be modified")
except KeyError:
pass
pool = session.scalar(select(Pool).where(Pool.pool == pool_name).limit(1))
if not pool:
raise NotFound(detail=f"Pool with name:'{pool_name}' not found")
try:
patch_body = pool_schema.load(request_dict)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
update_mask = [i.strip() for i in update_mask]
_patch_body = {}
try:
update_mask = [
pool_schema.declared_fields[field].attribute
if pool_schema.declared_fields[field].attribute
else field
for field in update_mask
]
except KeyError as err:
raise BadRequest(detail=f"Invalid field: {err.args[0]} in update mask")
_patch_body = {field: patch_body[field] for field in update_mask}
patch_body = _patch_body
else:
required_fields = {"name", "slots"}
fields_diff = required_fields - set(get_json_request_dict().keys())
if fields_diff:
raise BadRequest(detail=f"Missing required property(ies): {sorted(fields_diff)}")
for key, value in patch_body.items():
setattr(pool, key, value)
session.commit()
return pool_schema.dump(pool)
@security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL)])
@provide_session
def post_pool(*, session: Session = NEW_SESSION) -> APIResponse:
"""Create a pool."""
required_fields = {"name", "slots"} # Pool would require both fields in the post request
fields_diff = required_fields - set(get_json_request_dict().keys())
if fields_diff:
raise BadRequest(detail=f"Missing required property(ies): {sorted(fields_diff)}")
try:
post_body = pool_schema.load(get_json_request_dict(), session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
pool = Pool(**post_body)
try:
session.add(pool)
session.commit()
return pool_schema.dump(pool)
except IntegrityError:
raise AlreadyExists(detail=f"Pool: {post_body['pool']} already exists")
| 6,207 | 38.291139 | 105 |
py
|
airflow
|
airflow-main/airflow/decorators/base.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import inspect
import warnings
from functools import cached_property
from itertools import chain
from textwrap import dedent
from typing import (
Any,
Callable,
ClassVar,
Collection,
Dict,
Generic,
Iterator,
Mapping,
Sequence,
TypeVar,
cast,
overload,
)
import attr
import re2
import typing_extensions
from sqlalchemy.orm import Session
from airflow import Dataset
from airflow.exceptions import AirflowException
from airflow.models.abstractoperator import DEFAULT_RETRIES, DEFAULT_RETRY_DELAY
from airflow.models.baseoperator import (
BaseOperator,
coerce_resources,
coerce_timedelta,
get_merged_defaults,
parse_retries,
)
from airflow.models.dag import DAG, DagContext
from airflow.models.expandinput import (
EXPAND_INPUT_EMPTY,
DictOfListsExpandInput,
ExpandInput,
ListOfDictsExpandInput,
OperatorExpandArgument,
OperatorExpandKwargsArgument,
is_mappable,
)
from airflow.models.mappedoperator import MappedOperator, ValidationSource, ensure_xcomarg_return_value
from airflow.models.pool import Pool
from airflow.models.xcom_arg import XComArg
from airflow.typing_compat import ParamSpec, Protocol
from airflow.utils import timezone
from airflow.utils.context import KNOWN_CONTEXT_KEYS, Context
from airflow.utils.decorators import remove_task_decorator
from airflow.utils.helpers import prevent_duplicates
from airflow.utils.task_group import TaskGroup, TaskGroupContext
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import NOTSET
class ExpandableFactory(Protocol):
"""Protocol providing inspection against wrapped function.
This is used in ``validate_expand_kwargs`` and implemented by function
decorators like ``@task`` and ``@task_group``.
:meta private:
"""
function: Callable
@cached_property
def function_signature(self) -> inspect.Signature:
return inspect.signature(self.function)
@cached_property
def _mappable_function_argument_names(self) -> set[str]:
"""Arguments that can be mapped against."""
return set(self.function_signature.parameters)
def _validate_arg_names(self, func: ValidationSource, kwargs: dict[str, Any]) -> None:
"""Ensure that all arguments passed to operator-mapping functions are accounted for."""
parameters = self.function_signature.parameters
if any(v.kind == inspect.Parameter.VAR_KEYWORD for v in parameters.values()):
return
kwargs_left = kwargs.copy()
for arg_name in self._mappable_function_argument_names:
value = kwargs_left.pop(arg_name, NOTSET)
if func != "expand" or value is NOTSET or is_mappable(value):
continue
tname = type(value).__name__
raise ValueError(f"expand() got an unexpected type {tname!r} for keyword argument {arg_name!r}")
if len(kwargs_left) == 1:
raise TypeError(f"{func}() got an unexpected keyword argument {next(iter(kwargs_left))!r}")
elif kwargs_left:
names = ", ".join(repr(n) for n in kwargs_left)
raise TypeError(f"{func}() got unexpected keyword arguments {names}")
def get_unique_task_id(
task_id: str,
dag: DAG | None = None,
task_group: TaskGroup | None = None,
) -> str:
"""
Generate unique task id given a DAG (or if run in a DAG context).
IDs are generated by appending a unique number to the end of
the original task id.
Example:
task_id
task_id__1
task_id__2
...
task_id__20
"""
dag = dag or DagContext.get_current_dag()
if not dag:
return task_id
# We need to check if we are in the context of TaskGroup as the task_id may
# already be altered
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
tg_task_id = task_group.child_id(task_id) if task_group else task_id
if tg_task_id not in dag.task_ids:
return task_id
def _find_id_suffixes(dag: DAG) -> Iterator[int]:
prefix = re2.split(r"__\d+$", tg_task_id)[0]
for task_id in dag.task_ids:
match = re2.match(rf"^{prefix}__(\d+)$", task_id)
if match is None:
continue
yield int(match.group(1))
yield 0 # Default if there's no matching task ID.
core = re2.split(r"__\d+$", task_id)[0]
return f"{core}__{max(_find_id_suffixes(dag)) + 1}"
class DecoratedOperator(BaseOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
:param kwargs_to_upstream: For certain operators, we might need to upstream certain arguments
that would otherwise be absorbed by the DecoratedOperator (for example python_callable for the
PythonOperator). This gives a user the option to upstream kwargs as needed.
"""
template_fields: Sequence[str] = ("op_args", "op_kwargs")
template_fields_renderers = {"op_args": "py", "op_kwargs": "py"}
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs: Sequence[str] = ("python_callable",)
def __init__(
self,
*,
python_callable: Callable,
task_id: str,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
multiple_outputs: bool = False,
kwargs_to_upstream: dict[str, Any] | None = None,
**kwargs,
) -> None:
task_id = get_unique_task_id(task_id, kwargs.get("dag"), kwargs.get("task_group"))
self.python_callable = python_callable
kwargs_to_upstream = kwargs_to_upstream or {}
op_args = op_args or []
op_kwargs = op_kwargs or {}
# Check that arguments can be binded. There's a slight difference when
# we do validation for task-mapping: Since there's no guarantee we can
# receive enough arguments at parse time, we use bind_partial to simply
# check all the arguments we know are valid. Whether these are enough
# can only be known at execution time, when unmapping happens, and this
# is called without the _airflow_mapped_validation_only flag.
if kwargs.get("_airflow_mapped_validation_only"):
inspect.signature(python_callable).bind_partial(*op_args, **op_kwargs)
else:
inspect.signature(python_callable).bind(*op_args, **op_kwargs)
self.multiple_outputs = multiple_outputs
self.op_args = op_args
self.op_kwargs = op_kwargs
super().__init__(task_id=task_id, **kwargs_to_upstream, **kwargs)
def execute(self, context: Context):
# todo make this more generic (move to prepare_lineage) so it deals with non taskflow operators
# as well
for arg in chain(self.op_args, self.op_kwargs.values()):
if isinstance(arg, Dataset):
self.inlets.append(arg)
return_value = super().execute(context)
return self._handle_output(return_value=return_value, context=context, xcom_push=self.xcom_push)
def _handle_output(self, return_value: Any, context: Context, xcom_push: Callable):
"""
Handles logic for whether a decorator needs to push a single return value or multiple return values.
It sets outlets if any datasets are found in the returned value(s)
:param return_value:
:param context:
:param xcom_push:
"""
if isinstance(return_value, Dataset):
self.outlets.append(return_value)
if isinstance(return_value, list):
for item in return_value:
if isinstance(item, Dataset):
self.outlets.append(item)
if not self.multiple_outputs or return_value is None:
return return_value
if isinstance(return_value, dict):
for key in return_value.keys():
if not isinstance(key, str):
raise AirflowException(
"Returned dictionary keys must be strings when using "
f"multiple_outputs, found {key} ({type(key)}) instead"
)
for key, value in return_value.items():
if isinstance(value, Dataset):
self.outlets.append(value)
xcom_push(context, key, value)
else:
raise AirflowException(
f"Returned output was type {type(return_value)} expected dictionary for multiple_outputs"
)
return return_value
def _hook_apply_defaults(self, *args, **kwargs):
if "python_callable" not in kwargs:
return args, kwargs
python_callable = kwargs["python_callable"]
default_args = kwargs.get("default_args") or {}
op_kwargs = kwargs.get("op_kwargs") or {}
f_sig = inspect.signature(python_callable)
for arg in f_sig.parameters:
if arg not in op_kwargs and arg in default_args:
op_kwargs[arg] = default_args[arg]
kwargs["op_kwargs"] = op_kwargs
return args, kwargs
def get_python_source(self):
raw_source = inspect.getsource(self.python_callable)
res = dedent(raw_source)
res = remove_task_decorator(res, self.custom_operator_name)
return res
FParams = ParamSpec("FParams")
FReturn = TypeVar("FReturn")
OperatorSubclass = TypeVar("OperatorSubclass", bound="BaseOperator")
@attr.define(slots=False)
class _TaskDecorator(ExpandableFactory, Generic[FParams, FReturn, OperatorSubclass]):
"""
Helper class for providing dynamic task mapping to decorated functions.
``task_decorator_factory`` returns an instance of this, instead of just a plain wrapped function.
:meta private:
"""
function: Callable[FParams, FReturn] = attr.ib(validator=attr.validators.is_callable())
operator_class: type[OperatorSubclass]
multiple_outputs: bool = attr.ib()
kwargs: dict[str, Any] = attr.ib(factory=dict)
decorator_name: str = attr.ib(repr=False, default="task")
_airflow_is_task_decorator: ClassVar[bool] = True
is_setup: ClassVar[bool] = False
is_teardown: ClassVar[bool] = False
on_failure_fail_dagrun: ClassVar[bool] = False
@multiple_outputs.default
def _infer_multiple_outputs(self):
if "return" not in self.function.__annotations__:
# No return type annotation, nothing to infer
return False
try:
# We only care about the return annotation, not anything about the parameters
def fake():
...
fake.__annotations__ = {"return": self.function.__annotations__["return"]}
return_type = typing_extensions.get_type_hints(fake, self.function.__globals__).get("return", Any)
except NameError as e:
warnings.warn(
f"Cannot infer multiple_outputs for TaskFlow function {self.function.__name__!r} with forward"
f" type references that are not imported. (Error was {e})",
stacklevel=4,
)
return False
except TypeError: # Can't evaluate return type.
return False
ttype = getattr(return_type, "__origin__", return_type)
return ttype == dict or ttype == Dict
def __attrs_post_init__(self):
if "self" in self.function_signature.parameters:
raise TypeError(f"@{self.decorator_name} does not support methods")
self.kwargs.setdefault("task_id", self.function.__name__)
def __call__(self, *args: FParams.args, **kwargs: FParams.kwargs) -> XComArg:
if self.is_teardown:
if "trigger_rule" in self.kwargs:
raise ValueError("Trigger rule not configurable for teardown tasks.")
self.kwargs.update(trigger_rule=TriggerRule.ALL_DONE_SETUP_SUCCESS)
on_failure_fail_dagrun = self.kwargs.pop("on_failure_fail_dagrun", self.on_failure_fail_dagrun)
op = self.operator_class(
python_callable=self.function,
op_args=args,
op_kwargs=kwargs,
multiple_outputs=self.multiple_outputs,
**self.kwargs,
)
op.is_setup = self.is_setup
op.is_teardown = self.is_teardown
op.on_failure_fail_dagrun = on_failure_fail_dagrun
op_doc_attrs = [op.doc, op.doc_json, op.doc_md, op.doc_rst, op.doc_yaml]
# Set the task's doc_md to the function's docstring if it exists and no other doc* args are set.
if self.function.__doc__ and not any(op_doc_attrs):
op.doc_md = self.function.__doc__
return XComArg(op)
@property
def __wrapped__(self) -> Callable[FParams, FReturn]:
return self.function
def _validate_arg_names(self, func: ValidationSource, kwargs: dict[str, Any]):
# Ensure that context variables are not shadowed.
context_keys_being_mapped = KNOWN_CONTEXT_KEYS.intersection(kwargs)
if len(context_keys_being_mapped) == 1:
(name,) = context_keys_being_mapped
raise ValueError(f"cannot call {func}() on task context variable {name!r}")
elif context_keys_being_mapped:
names = ", ".join(repr(n) for n in context_keys_being_mapped)
raise ValueError(f"cannot call {func}() on task context variables {names}")
super()._validate_arg_names(func, kwargs)
def expand(self, **map_kwargs: OperatorExpandArgument) -> XComArg:
if not map_kwargs:
raise TypeError("no arguments to expand against")
self._validate_arg_names("expand", map_kwargs)
prevent_duplicates(self.kwargs, map_kwargs, fail_reason="mapping already partial")
# Since the input is already checked at parse time, we can set strict
# to False to skip the checks on execution.
return self._expand(DictOfListsExpandInput(map_kwargs), strict=False)
def expand_kwargs(self, kwargs: OperatorExpandKwargsArgument, *, strict: bool = True) -> XComArg:
if isinstance(kwargs, Sequence):
for item in kwargs:
if not isinstance(item, (XComArg, Mapping)):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
elif not isinstance(kwargs, XComArg):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
return self._expand(ListOfDictsExpandInput(kwargs), strict=strict)
def _expand(self, expand_input: ExpandInput, *, strict: bool) -> XComArg:
ensure_xcomarg_return_value(expand_input.value)
task_kwargs = self.kwargs.copy()
dag = task_kwargs.pop("dag", None) or DagContext.get_current_dag()
task_group = task_kwargs.pop("task_group", None) or TaskGroupContext.get_current_task_group(dag)
partial_kwargs, partial_params = get_merged_defaults(
dag=dag,
task_group=task_group,
task_params=task_kwargs.pop("params", None),
task_default_args=task_kwargs.pop("default_args", None),
)
partial_kwargs.update(task_kwargs)
task_id = get_unique_task_id(partial_kwargs.pop("task_id"), dag, task_group)
if task_group:
task_id = task_group.child_id(task_id)
# Logic here should be kept in sync with BaseOperatorMeta.partial().
if "task_concurrency" in partial_kwargs:
raise TypeError("unexpected argument: task_concurrency")
if partial_kwargs.get("wait_for_downstream"):
partial_kwargs["depends_on_past"] = True
start_date = timezone.convert_to_utc(partial_kwargs.pop("start_date", None))
end_date = timezone.convert_to_utc(partial_kwargs.pop("end_date", None))
if partial_kwargs.get("pool") is None:
partial_kwargs["pool"] = Pool.DEFAULT_POOL_NAME
partial_kwargs["retries"] = parse_retries(partial_kwargs.get("retries", DEFAULT_RETRIES))
partial_kwargs["retry_delay"] = coerce_timedelta(
partial_kwargs.get("retry_delay", DEFAULT_RETRY_DELAY),
key="retry_delay",
)
max_retry_delay = partial_kwargs.get("max_retry_delay")
partial_kwargs["max_retry_delay"] = (
max_retry_delay
if max_retry_delay is None
else coerce_timedelta(max_retry_delay, key="max_retry_delay")
)
partial_kwargs["resources"] = coerce_resources(partial_kwargs.get("resources"))
partial_kwargs.setdefault("executor_config", {})
partial_kwargs.setdefault("op_args", [])
partial_kwargs.setdefault("op_kwargs", {})
# Mypy does not work well with a subclassed attrs class :(
_MappedOperator = cast(Any, DecoratedMappedOperator)
try:
operator_name = self.operator_class.custom_operator_name # type: ignore
except AttributeError:
operator_name = self.operator_class.__name__
operator = _MappedOperator(
operator_class=self.operator_class,
expand_input=EXPAND_INPUT_EMPTY, # Don't use this; mapped values go to op_kwargs_expand_input.
partial_kwargs=partial_kwargs,
task_id=task_id,
params=partial_params,
deps=MappedOperator.deps_for(self.operator_class),
operator_extra_links=self.operator_class.operator_extra_links,
template_ext=self.operator_class.template_ext,
template_fields=self.operator_class.template_fields,
template_fields_renderers=self.operator_class.template_fields_renderers,
ui_color=self.operator_class.ui_color,
ui_fgcolor=self.operator_class.ui_fgcolor,
is_empty=False,
task_module=self.operator_class.__module__,
task_type=self.operator_class.__name__,
operator_name=operator_name,
dag=dag,
task_group=task_group,
start_date=start_date,
end_date=end_date,
multiple_outputs=self.multiple_outputs,
python_callable=self.function,
op_kwargs_expand_input=expand_input,
disallow_kwargs_override=strict,
# Different from classic operators, kwargs passed to a taskflow
# task's expand() contribute to the op_kwargs operator argument, not
# the operator arguments themselves, and should expand against it.
expand_input_attr="op_kwargs_expand_input",
)
return XComArg(operator=operator)
def partial(self, **kwargs: Any) -> _TaskDecorator[FParams, FReturn, OperatorSubclass]:
self._validate_arg_names("partial", kwargs)
old_kwargs = self.kwargs.get("op_kwargs", {})
prevent_duplicates(old_kwargs, kwargs, fail_reason="duplicate partial")
kwargs.update(old_kwargs)
return attr.evolve(self, kwargs={**self.kwargs, "op_kwargs": kwargs})
def override(self, **kwargs: Any) -> _TaskDecorator[FParams, FReturn, OperatorSubclass]:
result = attr.evolve(self, kwargs={**self.kwargs, **kwargs})
setattr(result, "is_setup", self.is_setup)
setattr(result, "is_teardown", self.is_teardown)
setattr(result, "on_failure_fail_dagrun", self.on_failure_fail_dagrun)
return result
@attr.define(kw_only=True, repr=False)
class DecoratedMappedOperator(MappedOperator):
"""MappedOperator implementation for @task-decorated task function."""
multiple_outputs: bool
python_callable: Callable
# We can't save these in expand_input because op_kwargs need to be present
# in partial_kwargs, and MappedOperator prevents duplication.
op_kwargs_expand_input: ExpandInput
def __hash__(self):
return id(self)
def __attrs_post_init__(self):
# The magic super() doesn't work here, so we use the explicit form.
# Not using super(..., self) to work around pyupgrade bug.
super(DecoratedMappedOperator, DecoratedMappedOperator).__attrs_post_init__(self)
XComArg.apply_upstream_relationship(self, self.op_kwargs_expand_input.value)
def _expand_mapped_kwargs(self, context: Context, session: Session) -> tuple[Mapping[str, Any], set[int]]:
# We only use op_kwargs_expand_input so this must always be empty.
assert self.expand_input is EXPAND_INPUT_EMPTY
op_kwargs, resolved_oids = super()._expand_mapped_kwargs(context, session)
return {"op_kwargs": op_kwargs}, resolved_oids
def _get_unmap_kwargs(self, mapped_kwargs: Mapping[str, Any], *, strict: bool) -> dict[str, Any]:
partial_op_kwargs = self.partial_kwargs["op_kwargs"]
mapped_op_kwargs = mapped_kwargs["op_kwargs"]
if strict:
prevent_duplicates(partial_op_kwargs, mapped_op_kwargs, fail_reason="mapping already partial")
kwargs = {
"multiple_outputs": self.multiple_outputs,
"python_callable": self.python_callable,
"op_kwargs": {**partial_op_kwargs, **mapped_op_kwargs},
}
return super()._get_unmap_kwargs(kwargs, strict=False)
class Task(Protocol, Generic[FParams, FReturn]):
"""Declaration of a @task-decorated callable for type-checking.
An instance of this type inherits the call signature of the decorated
function wrapped in it (not *exactly* since it actually returns an XComArg,
but there's no way to express that right now), and provides two additional
methods for task-mapping.
This type is implemented by ``_TaskDecorator`` at runtime.
"""
__call__: Callable[FParams, XComArg]
function: Callable[FParams, FReturn]
@property
def __wrapped__(self) -> Callable[FParams, FReturn]:
...
def partial(self, **kwargs: Any) -> Task[FParams, FReturn]:
...
def expand(self, **kwargs: OperatorExpandArgument) -> XComArg:
...
def expand_kwargs(self, kwargs: OperatorExpandKwargsArgument, *, strict: bool = True) -> XComArg:
...
def override(self, **kwargs: Any) -> Task[FParams, FReturn]:
...
class TaskDecorator(Protocol):
"""Type declaration for ``task_decorator_factory`` return type."""
@overload
def __call__( # type: ignore[misc]
self,
python_callable: Callable[FParams, FReturn],
) -> Task[FParams, FReturn]:
"""For the "bare decorator" ``@task`` case."""
@overload
def __call__(
self,
*,
multiple_outputs: bool | None = None,
**kwargs: Any,
) -> Callable[[Callable[FParams, FReturn]], Task[FParams, FReturn]]:
"""For the decorator factory ``@task()`` case."""
def override(self, **kwargs: Any) -> Task[FParams, FReturn]:
...
def task_decorator_factory(
python_callable: Callable | None = None,
*,
multiple_outputs: bool | None = None,
decorated_operator_class: type[BaseOperator],
**kwargs,
) -> TaskDecorator:
"""Generate a wrapper that wraps a function into an Airflow operator.
Can be reused in a single DAG.
:param python_callable: Function to decorate.
:param multiple_outputs: If set to True, the decorated function's return
value will be unrolled to multiple XCom values. Dict will unroll to XCom
values with its keys as XCom keys. If set to False (default), only at
most one XCom value is pushed.
:param decorated_operator_class: The operator that executes the logic needed
to run the python function in the correct environment.
Other kwargs are directly forwarded to the underlying operator class when
it's instantiated.
"""
if multiple_outputs is None:
multiple_outputs = cast(bool, attr.NOTHING)
if python_callable:
decorator = _TaskDecorator(
function=python_callable,
multiple_outputs=multiple_outputs,
operator_class=decorated_operator_class,
kwargs=kwargs,
)
return cast(TaskDecorator, decorator)
elif python_callable is not None:
raise TypeError("No args allowed while using @task, use kwargs instead")
def decorator_factory(python_callable):
return _TaskDecorator(
function=python_callable,
multiple_outputs=multiple_outputs,
operator_class=decorated_operator_class,
kwargs=kwargs,
)
return cast(TaskDecorator, decorator_factory)
| 25,879 | 39.755906 | 110 |
py
|
airflow
|
airflow-main/airflow/decorators/task_group.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implements the ``@task_group`` function decorator.
When the decorated function is called, a task group will be created to represent
a collection of closely related tasks on the same DAG that should be grouped
together when the DAG is displayed graphically.
"""
from __future__ import annotations
import functools
import inspect
import warnings
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Generic, Mapping, Sequence, TypeVar, overload
import attr
from airflow.decorators.base import ExpandableFactory
from airflow.models.expandinput import (
DictOfListsExpandInput,
ListOfDictsExpandInput,
MappedArgument,
OperatorExpandArgument,
OperatorExpandKwargsArgument,
)
from airflow.models.taskmixin import DAGNode
from airflow.models.xcom_arg import XComArg
from airflow.typing_compat import ParamSpec
from airflow.utils.helpers import prevent_duplicates
from airflow.utils.task_group import MappedTaskGroup, TaskGroup
if TYPE_CHECKING:
from airflow.models.dag import DAG
FParams = ParamSpec("FParams")
FReturn = TypeVar("FReturn", None, DAGNode)
task_group_sig = inspect.signature(TaskGroup.__init__)
@attr.define()
class _TaskGroupFactory(ExpandableFactory, Generic[FParams, FReturn]):
function: Callable[FParams, FReturn] = attr.ib(validator=attr.validators.is_callable())
tg_kwargs: dict[str, Any] = attr.ib(factory=dict) # Parameters forwarded to TaskGroup.
partial_kwargs: dict[str, Any] = attr.ib(factory=dict) # Parameters forwarded to 'function'.
_task_group_created: bool = attr.ib(False, init=False)
tg_class: ClassVar[type[TaskGroup]] = TaskGroup
@tg_kwargs.validator
def _validate(self, _, kwargs):
task_group_sig.bind_partial(**kwargs)
def __attrs_post_init__(self):
self.tg_kwargs.setdefault("group_id", self.function.__name__)
def __del__(self):
if self.partial_kwargs and not self._task_group_created:
try:
group_id = repr(self.tg_kwargs["group_id"])
except KeyError:
group_id = f"at {hex(id(self))}"
warnings.warn(f"Partial task group {group_id} was never mapped!")
def __call__(self, *args: FParams.args, **kwargs: FParams.kwargs) -> DAGNode:
"""Instantiate the task group.
This uses the wrapped function to create a task group. Depending on the
return type of the wrapped function, this either returns the last task
in the group, or the group itself, to support task chaining.
"""
return self._create_task_group(TaskGroup, *args, **kwargs)
def _create_task_group(self, tg_factory: Callable[..., TaskGroup], *args: Any, **kwargs: Any) -> DAGNode:
with tg_factory(add_suffix_on_collision=True, **self.tg_kwargs) as task_group:
if self.function.__doc__ and not task_group.tooltip:
task_group.tooltip = self.function.__doc__
# Invoke function to run Tasks inside the TaskGroup
retval = self.function(*args, **kwargs)
self._task_group_created = True
# If the task-creating function returns a task, forward the return value
# so dependencies bind to it. This is equivalent to
# with TaskGroup(...) as tg:
# t2 = task_2(task_1())
# start >> t2 >> end
if retval is not None:
return retval
# Otherwise return the task group as a whole, equivalent to
# with TaskGroup(...) as tg:
# task_1()
# task_2()
# start >> tg >> end
return task_group
def override(self, **kwargs: Any) -> _TaskGroupFactory[FParams, FReturn]:
# TODO: fixme when mypy gets compatible with new attrs
return attr.evolve(self, tg_kwargs={**self.tg_kwargs, **kwargs}) # type: ignore[arg-type]
def partial(self, **kwargs: Any) -> _TaskGroupFactory[FParams, FReturn]:
self._validate_arg_names("partial", kwargs)
prevent_duplicates(self.partial_kwargs, kwargs, fail_reason="duplicate partial")
kwargs.update(self.partial_kwargs)
# TODO: fixme when mypy gets compatible with new attrs
return attr.evolve(self, partial_kwargs=kwargs) # type: ignore[arg-type]
def expand(self, **kwargs: OperatorExpandArgument) -> DAGNode:
if not kwargs:
raise TypeError("no arguments to expand against")
self._validate_arg_names("expand", kwargs)
prevent_duplicates(self.partial_kwargs, kwargs, fail_reason="mapping already partial")
expand_input = DictOfListsExpandInput(kwargs)
return self._create_task_group(
functools.partial(MappedTaskGroup, expand_input=expand_input),
**self.partial_kwargs,
**{k: MappedArgument(input=expand_input, key=k) for k in kwargs},
)
def expand_kwargs(self, kwargs: OperatorExpandKwargsArgument) -> DAGNode:
if isinstance(kwargs, Sequence):
for item in kwargs:
if not isinstance(item, (XComArg, Mapping)):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
elif not isinstance(kwargs, XComArg):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
# It's impossible to build a dict of stubs as keyword arguments if the
# function uses * or ** wildcard arguments.
function_has_vararg = any(
v.kind == inspect.Parameter.VAR_POSITIONAL or v.kind == inspect.Parameter.VAR_KEYWORD
for v in self.function_signature.parameters.values()
)
if function_has_vararg:
raise TypeError("calling expand_kwargs() on task group function with * or ** is not supported")
# We can't be sure how each argument is used in the function (well
# technically we can with AST but let's not), so we have to create stubs
# for every argument, including those with default values.
map_kwargs = (k for k in self.function_signature.parameters if k not in self.partial_kwargs)
expand_input = ListOfDictsExpandInput(kwargs)
return self._create_task_group(
functools.partial(MappedTaskGroup, expand_input=expand_input),
**self.partial_kwargs,
**{k: MappedArgument(input=expand_input, key=k) for k in map_kwargs},
)
# This covers the @task_group() case. Annotations are copied from the TaskGroup
# class, only providing a default to 'group_id' (this is optional for the
# decorator and defaults to the decorated function's name). Please keep them in
# sync with TaskGroup when you can! Note that since this is an overload, these
# argument defaults aren't actually used at runtime--the real implementation
# does not use them, and simply rely on TaskGroup's defaults, so it's not
# disastrous if they go out of sync with TaskGroup.
@overload
def task_group(
group_id: str | None = None,
prefix_group_id: bool = True,
parent_group: TaskGroup | None = None,
dag: DAG | None = None,
default_args: dict[str, Any] | None = None,
tooltip: str = "",
ui_color: str = "CornflowerBlue",
ui_fgcolor: str = "#000",
add_suffix_on_collision: bool = False,
) -> Callable[[Callable[FParams, FReturn]], _TaskGroupFactory[FParams, FReturn]]:
...
# This covers the @task_group case (no parentheses).
@overload
def task_group(python_callable: Callable[FParams, FReturn]) -> _TaskGroupFactory[FParams, FReturn]:
...
def task_group(python_callable=None, **tg_kwargs):
"""Python TaskGroup decorator.
This wraps a function into an Airflow TaskGroup. When used as the
``@task_group()`` form, all arguments are forwarded to the underlying
TaskGroup class. Can be used to parametrize TaskGroup.
:param python_callable: Function to decorate.
:param tg_kwargs: Keyword arguments for the TaskGroup object.
"""
if callable(python_callable) and not tg_kwargs:
return _TaskGroupFactory(function=python_callable, tg_kwargs=tg_kwargs)
return functools.partial(_TaskGroupFactory, tg_kwargs=tg_kwargs)
| 8,919 | 41.47619 | 109 |
py
|
airflow
|
airflow-main/airflow/decorators/branch_python.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Callable
from airflow.decorators.base import DecoratedOperator, TaskDecorator, task_decorator_factory
from airflow.operators.python import BranchPythonOperator
class _BranchPythonDecoratedOperator(DecoratedOperator, BranchPythonOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
"""
custom_operator_name: str = "@task.branch"
def __init__(
self,
**kwargs,
) -> None:
kwargs_to_upstream = {
"python_callable": kwargs["python_callable"],
"op_args": kwargs["op_args"],
"op_kwargs": kwargs["op_kwargs"],
}
super().__init__(kwargs_to_upstream=kwargs_to_upstream, **kwargs)
def branch_task(
python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs
) -> TaskDecorator:
"""
Wraps a python function into a BranchPythonOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_BranchPythonDecoratedOperator,
**kwargs,
)
| 2,884 | 37.466667 | 97 |
py
|
airflow
|
airflow-main/airflow/decorators/external_python.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Callable
from airflow.decorators.base import DecoratedOperator, TaskDecorator, task_decorator_factory
from airflow.operators.python import ExternalPythonOperator
class _PythonExternalDecoratedOperator(DecoratedOperator, ExternalPythonOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python: Full path string (file-system specific) that points to a Python binary inside
a virtualenv that should be used (in ``VENV/bin`` folder). Should be absolute path
(so usually start with "/" or "X:/" depending on the filesystem/os used).
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
custom_operator_name: str = "@task.external_python"
def __init__(self, *, python_callable, op_args, op_kwargs, **kwargs) -> None:
kwargs_to_upstream = {
"python_callable": python_callable,
"op_args": op_args,
"op_kwargs": op_kwargs,
}
super().__init__(
kwargs_to_upstream=kwargs_to_upstream,
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
**kwargs,
)
def external_python_task(
python: str | None = None,
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""Wraps a callable into an Airflow operator to run via a Python virtual environment.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used during type checking or auto-completion.
:meta private:
:param python: Full path string (file-system specific) that points to a Python binary inside
a virtualenv that should be used (in ``VENV/bin`` folder). Should be absolute path
(so usually start with "/" or "X:/" depending on the filesystem/os used).
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python=python,
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonExternalDecoratedOperator,
**kwargs,
)
| 3,702 | 41.563218 | 108 |
py
|
airflow
|
airflow-main/airflow/decorators/python.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Callable, Sequence
from airflow.decorators.base import DecoratedOperator, TaskDecorator, task_decorator_factory
from airflow.operators.python import PythonOperator
class _PythonDecoratedOperator(DecoratedOperator, PythonOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
template_fields: Sequence[str] = ("templates_dict", "op_args", "op_kwargs")
template_fields_renderers = {"templates_dict": "json", "op_args": "py", "op_kwargs": "py"}
custom_operator_name: str = "@task"
def __init__(self, *, python_callable, op_args, op_kwargs, **kwargs) -> None:
kwargs_to_upstream = {
"python_callable": python_callable,
"op_args": op_args,
"op_kwargs": op_kwargs,
}
super().__init__(
kwargs_to_upstream=kwargs_to_upstream,
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
**kwargs,
)
def python_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""Wraps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonDecoratedOperator,
**kwargs,
)
| 3,096 | 39.220779 | 108 |
py
|
airflow
|
airflow-main/airflow/decorators/short_circuit.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Callable
from airflow.decorators.base import DecoratedOperator, TaskDecorator, task_decorator_factory
from airflow.operators.python import ShortCircuitOperator
class _ShortCircuitDecoratedOperator(DecoratedOperator, ShortCircuitOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
custom_operator_name: str = "@task.short_circuit"
def __init__(self, *, python_callable, op_args, op_kwargs, **kwargs) -> None:
kwargs_to_upstream = {
"python_callable": python_callable,
"op_args": op_args,
"op_kwargs": op_kwargs,
}
super().__init__(
kwargs_to_upstream=kwargs_to_upstream,
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
**kwargs,
)
def short_circuit_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""Wraps a function into an ShortCircuitOperator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used only used during type checking or auto-completion.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
:meta private:
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_ShortCircuitDecoratedOperator,
**kwargs,
)
| 3,062 | 38.269231 | 108 |
py
|
airflow
|
airflow-main/airflow/decorators/sensor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Callable, Sequence
from airflow.decorators.base import TaskDecorator, get_unique_task_id, task_decorator_factory
from airflow.sensors.python import PythonSensor
class DecoratedSensorOperator(PythonSensor):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param task_id: task Id
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param kwargs_to_upstream: For certain operators, we might need to upstream certain arguments
that would otherwise be absorbed by the DecoratedOperator (for example python_callable for the
PythonOperator). This gives a user the option to upstream kwargs as needed.
"""
template_fields: Sequence[str] = ("op_args", "op_kwargs")
template_fields_renderers: dict[str, str] = {"op_args": "py", "op_kwargs": "py"}
custom_operator_name = "@task.sensor"
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs: Sequence[str] = ("python_callable",)
def __init__(
self,
*,
task_id: str,
**kwargs,
) -> None:
kwargs.pop("multiple_outputs")
kwargs["task_id"] = get_unique_task_id(task_id, kwargs.get("dag"), kwargs.get("task_group"))
super().__init__(**kwargs)
def sensor_task(python_callable: Callable | None = None, **kwargs) -> TaskDecorator:
"""
Wraps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=False,
decorated_operator_class=DecoratedSensorOperator,
**kwargs,
)
| 2,905 | 38.27027 | 102 |
py
|
airflow
|
airflow-main/airflow/decorators/setup_teardown.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import types
from typing import Callable
from airflow import AirflowException, XComArg
from airflow.decorators import python_task
from airflow.decorators.task_group import _TaskGroupFactory
from airflow.models import BaseOperator
from airflow.utils.setup_teardown import SetupTeardownContext
def setup_task(func: Callable) -> Callable:
# Using FunctionType here since _TaskDecorator is also a callable
if isinstance(func, types.FunctionType):
func = python_task(func)
if isinstance(func, _TaskGroupFactory):
raise AirflowException("Task groups cannot be marked as setup or teardown.")
func.is_setup = True # type: ignore[attr-defined]
return func
def teardown_task(_func=None, *, on_failure_fail_dagrun: bool = False) -> Callable:
def teardown(func: Callable) -> Callable:
# Using FunctionType here since _TaskDecorator is also a callable
if isinstance(func, types.FunctionType):
func = python_task(func)
if isinstance(func, _TaskGroupFactory):
raise AirflowException("Task groups cannot be marked as setup or teardown.")
func.is_teardown = True # type: ignore[attr-defined]
func.on_failure_fail_dagrun = on_failure_fail_dagrun # type: ignore[attr-defined]
return func
if _func is None:
return teardown
return teardown(_func)
class ContextWrapper(list):
"""A list subclass that has a context manager that pushes setup/teardown tasks to the context."""
def __init__(self, tasks: list[BaseOperator | XComArg]):
self.tasks = tasks
super().__init__(tasks)
def __enter__(self):
operators = []
for task in self.tasks:
if isinstance(task, BaseOperator):
operators.append(task)
if not task.is_setup and not task.is_teardown:
raise AirflowException("Only setup/teardown tasks can be used as context managers.")
elif not task.operator.is_setup and not task.operator.is_teardown:
raise AirflowException("Only setup/teardown tasks can be used as context managers.")
if not operators:
# means we have XComArgs
operators = [task.operator for task in self.tasks]
SetupTeardownContext.push_setup_teardown_task(operators)
return SetupTeardownContext
def __exit__(self, exc_type, exc_val, exc_tb):
SetupTeardownContext.set_work_task_roots_and_leaves()
context_wrapper = ContextWrapper
| 3,328 | 39.597561 | 104 |
py
|
airflow
|
airflow-main/airflow/decorators/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Callable
from airflow.decorators.base import TaskDecorator
from airflow.decorators.branch_python import branch_task
from airflow.decorators.external_python import external_python_task
from airflow.decorators.python import python_task
from airflow.decorators.python_virtualenv import virtualenv_task
from airflow.decorators.sensor import sensor_task
from airflow.decorators.setup_teardown import setup_task, teardown_task
from airflow.decorators.short_circuit import short_circuit_task
from airflow.decorators.task_group import task_group
from airflow.models.dag import dag
from airflow.providers_manager import ProvidersManager
# Please keep this in sync with the .pyi's __all__.
__all__ = [
"TaskDecorator",
"TaskDecoratorCollection",
"dag",
"task",
"task_group",
"python_task",
"virtualenv_task",
"external_python_task",
"branch_task",
"short_circuit_task",
"sensor_task",
"setup",
"teardown",
]
class TaskDecoratorCollection:
"""Implementation to provide the ``@task`` syntax."""
python = staticmethod(python_task)
virtualenv = staticmethod(virtualenv_task)
external_python = staticmethod(external_python_task)
branch = staticmethod(branch_task)
short_circuit = staticmethod(short_circuit_task)
sensor = staticmethod(sensor_task)
__call__: Any = python # Alias '@task' to '@task.python'.
def __getattr__(self, name: str) -> TaskDecorator:
"""Dynamically get provider-registered task decorators, e.g. ``@task.docker``."""
if name.startswith("__"):
raise AttributeError(f"{type(self).__name__} has no attribute {name!r}")
decorators = ProvidersManager().taskflow_decorators
if name not in decorators:
raise AttributeError(f"task decorator {name!r} not found")
return decorators[name]
task = TaskDecoratorCollection()
setup: Callable = setup_task
teardown: Callable = teardown_task
| 2,789 | 35.710526 | 89 |
py
|
airflow
|
airflow-main/airflow/decorators/python_virtualenv.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Callable
from airflow.decorators.base import DecoratedOperator, TaskDecorator, task_decorator_factory
from airflow.operators.python import PythonVirtualenvOperator
class _PythonVirtualenvDecoratedOperator(DecoratedOperator, PythonVirtualenvOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
custom_operator_name: str = "@task.virtualenv"
def __init__(self, *, python_callable, op_args, op_kwargs, **kwargs) -> None:
kwargs_to_upstream = {
"python_callable": python_callable,
"op_args": op_args,
"op_kwargs": op_kwargs,
}
super().__init__(
kwargs_to_upstream=kwargs_to_upstream,
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
**kwargs,
)
def virtualenv_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""Wraps a callable into an Airflow operator to run via a Python virtual environment.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used only used during type checking or auto-completion.
:meta private:
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonVirtualenvDecoratedOperator,
**kwargs,
)
| 3,116 | 38.455696 | 108 |
py
|
airflow
|
airflow-main/airflow/operators/datetime.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import warnings
from typing import Iterable
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.operators.branch import BaseBranchOperator
from airflow.utils import timezone
from airflow.utils.context import Context
class BranchDateTimeOperator(BaseBranchOperator):
"""Branches into one of two lists of tasks depending on the current datetime.
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BranchDateTimeOperator`.
True branch will be returned when ``datetime.datetime.now()`` falls below
``target_upper`` and above ``target_lower``.
:param follow_task_ids_if_true: task id or task ids to follow if
``datetime.datetime.now()`` falls above target_lower and below ``target_upper``.
:param follow_task_ids_if_false: task id or task ids to follow if
``datetime.datetime.now()`` falls below target_lower or above ``target_upper``.
:param target_lower: target lower bound.
:param target_upper: target upper bound.
:param use_task_logical_date: If ``True``, uses task's logical date to compare with targets.
Execution date is useful for backfilling. If ``False``, uses system's date.
"""
def __init__(
self,
*,
follow_task_ids_if_true: str | Iterable[str],
follow_task_ids_if_false: str | Iterable[str],
target_lower: datetime.datetime | datetime.time | None,
target_upper: datetime.datetime | datetime.time | None,
use_task_logical_date: bool = False,
use_task_execution_date: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
if target_lower is None and target_upper is None:
raise AirflowException(
"Both target_upper and target_lower are None. At least one "
"must be defined to be compared to the current datetime"
)
self.target_lower = target_lower
self.target_upper = target_upper
self.follow_task_ids_if_true = follow_task_ids_if_true
self.follow_task_ids_if_false = follow_task_ids_if_false
self.use_task_logical_date = use_task_logical_date
if use_task_execution_date:
self.use_task_logical_date = use_task_execution_date
warnings.warn(
"Parameter ``use_task_execution_date`` is deprecated. Use ``use_task_logical_date``.",
RemovedInAirflow3Warning,
stacklevel=2,
)
def choose_branch(self, context: Context) -> str | Iterable[str]:
if self.use_task_logical_date:
now = context["logical_date"]
else:
now = timezone.coerce_datetime(timezone.utcnow())
lower, upper = target_times_as_dates(now, self.target_lower, self.target_upper)
lower = timezone.coerce_datetime(lower, self.dag.timezone)
upper = timezone.coerce_datetime(upper, self.dag.timezone)
if upper is not None and upper < now:
return self.follow_task_ids_if_false
if lower is not None and lower > now:
return self.follow_task_ids_if_false
return self.follow_task_ids_if_true
def target_times_as_dates(
base_date: datetime.datetime,
lower: datetime.datetime | datetime.time | None,
upper: datetime.datetime | datetime.time | None,
):
"""Ensures upper and lower time targets are datetimes by combining them with base_date."""
if isinstance(lower, datetime.datetime) and isinstance(upper, datetime.datetime):
return lower, upper
if lower is not None and isinstance(lower, datetime.time):
lower = datetime.datetime.combine(base_date, lower)
if upper is not None and isinstance(upper, datetime.time):
upper = datetime.datetime.combine(base_date, upper)
if lower is None or upper is None:
return lower, upper
if upper < lower:
upper += datetime.timedelta(days=1)
return lower, upper
| 4,838 | 40.358974 | 102 |
py
|
airflow
|
airflow-main/airflow/operators/latest_only.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Contains an operator to run downstream tasks only for the latest scheduled DagRun."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable
import pendulum
from airflow.operators.branch import BaseBranchOperator
from airflow.utils.context import Context
if TYPE_CHECKING:
from airflow.models import DAG, DagRun
class LatestOnlyOperator(BaseBranchOperator):
"""
Skip tasks that are not running during the most recent schedule interval.
If the task is run outside the latest schedule interval (i.e. external_trigger),
all directly downstream tasks will be skipped.
Note that downstream tasks are never skipped if the given DAG_Run is
marked as externally triggered.
"""
ui_color = "#e9ffdb" # nyanza
def choose_branch(self, context: Context) -> str | Iterable[str]:
# If the DAG Run is externally triggered, then return without
# skipping downstream tasks
dag_run: DagRun = context["dag_run"]
if dag_run.external_trigger:
self.log.info("Externally triggered DAG_Run: allowing execution to proceed.")
return list(context["task"].get_direct_relative_ids(upstream=False))
dag: DAG = context["dag"]
next_info = dag.next_dagrun_info(dag.get_run_data_interval(dag_run), restricted=False)
now = pendulum.now("UTC")
if next_info is None:
self.log.info("Last scheduled execution: allowing execution to proceed.")
return list(context["task"].get_direct_relative_ids(upstream=False))
left_window, right_window = next_info.data_interval
self.log.info(
"Checking latest only with left_window: %s right_window: %s now: %s",
left_window,
right_window,
now,
)
if not left_window < now <= right_window:
self.log.info("Not latest execution, skipping downstream.")
# we return an empty list, thus the parent BaseBranchOperator
# won't exclude any downstream tasks from skipping.
return []
else:
self.log.info("Latest, allowing execution to proceed.")
return list(context["task"].get_direct_relative_ids(upstream=False))
| 3,040 | 38.493506 | 94 |
py
|
airflow
|
airflow-main/airflow/operators/python.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import inspect
import logging
import os
import pickle
import shutil
import subprocess
import sys
import types
import warnings
from abc import ABCMeta, abstractmethod
from collections.abc import Container
from pathlib import Path
from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Collection, Iterable, Mapping, Sequence, cast
import dill
from airflow.exceptions import (
AirflowConfigException,
AirflowException,
AirflowSkipException,
DeserializingResultError,
RemovedInAirflow3Warning,
)
from airflow.models.baseoperator import BaseOperator
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskinstance import _CURRENT_CONTEXT
from airflow.utils.context import Context, context_copy_partial, context_merge
from airflow.utils.operator_helpers import KeywordParameters
from airflow.utils.process_utils import execute_in_subprocess
from airflow.utils.python_virtualenv import prepare_virtualenv, write_python_script
if TYPE_CHECKING:
from pendulum.datetime import DateTime
def task(python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs):
"""Deprecated. Use :func:`airflow.decorators.task` instead.
Calls ``@task.python`` and allows users to turn a Python function into
an Airflow task.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
"""
# To maintain backwards compatibility, we import the task object into this file
# This prevents breakages in dags that use `from airflow.operators.python import task`
from airflow.decorators.python import python_task
warnings.warn(
"""airflow.operators.python.task is deprecated. Please use the following instead
from airflow.decorators import task
@task
def my_task()""",
RemovedInAirflow3Warning,
stacklevel=2,
)
return python_task(python_callable=python_callable, multiple_outputs=multiple_outputs, **kwargs)
class PythonOperator(BaseOperator):
"""
Executes a Python callable.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonOperator`
When running your callable, Airflow will pass a set of keyword arguments that can be used in your
function. This set of kwargs correspond exactly to what you can use in your jinja templates.
For this to work, you need to define ``**kwargs`` in your function header, or you can add directly the
keyword arguments you would like to get - for example with the below code your callable will get
the values of ``ti`` and ``next_ds`` context variables.
With explicit arguments:
.. code-block:: python
def my_python_callable(ti, next_ds):
pass
With kwargs:
.. code-block:: python
def my_python_callable(**kwargs):
ti = kwargs["ti"]
next_ds = kwargs["next_ds"]
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied. (templated)
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:param show_return_value_in_logs: a bool value whether to show return_value
logs. Defaults to True, which allows return value log output.
It can be set to False to prevent log output of return value when you return huge data
such as transmission a large amount of XCom to TaskAPI.
"""
template_fields: Sequence[str] = ("templates_dict", "op_args", "op_kwargs")
template_fields_renderers = {"templates_dict": "json", "op_args": "py", "op_kwargs": "py"}
BLUE = "#ffefeb"
ui_color = BLUE
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects(e.g protobuf).
shallow_copy_attrs: Sequence[str] = (
"python_callable",
"op_kwargs",
)
def __init__(
self,
*,
python_callable: Callable,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
templates_dict: dict[str, Any] | None = None,
templates_exts: Sequence[str] | None = None,
show_return_value_in_logs: bool = True,
**kwargs,
) -> None:
if kwargs.get("provide_context"):
warnings.warn(
"provide_context is deprecated as of 2.0 and is no longer required",
RemovedInAirflow3Warning,
stacklevel=2,
)
kwargs.pop("provide_context", None)
super().__init__(**kwargs)
if not callable(python_callable):
raise AirflowException("`python_callable` param must be callable")
self.python_callable = python_callable
self.op_args = op_args or ()
self.op_kwargs = op_kwargs or {}
self.templates_dict = templates_dict
if templates_exts:
self.template_ext = templates_exts
self.show_return_value_in_logs = show_return_value_in_logs
def execute(self, context: Context) -> Any:
context_merge(context, self.op_kwargs, templates_dict=self.templates_dict)
self.op_kwargs = self.determine_kwargs(context)
return_value = self.execute_callable()
if self.show_return_value_in_logs:
self.log.info("Done. Returned value was: %s", return_value)
else:
self.log.info("Done. Returned value not shown")
return return_value
def determine_kwargs(self, context: Mapping[str, Any]) -> Mapping[str, Any]:
return KeywordParameters.determine(self.python_callable, self.op_args, context).unpacking()
def execute_callable(self) -> Any:
"""
Calls the python callable with the given arguments.
:return: the return value of the call.
"""
return self.python_callable(*self.op_args, **self.op_kwargs)
class BranchPythonOperator(PythonOperator, SkipMixin):
"""
A workflow can "branch" or follow a path after the execution of this task.
It derives the PythonOperator and expects a Python function that returns
a single task_id or list of task_ids to follow. The task_id(s) returned
should point to a task directly downstream from {self}. All other "branches"
or directly downstream tasks are marked with a state of ``skipped`` so that
these paths can't move forward. The ``skipped`` states are propagated
downstream to allow for the DAG state to fill up and the DAG run's state
to be inferred.
"""
def execute(self, context: Context) -> Any:
branch = super().execute(context)
self.log.info("Branch callable return %s", branch)
self.skip_all_except(context["ti"], branch)
return branch
class ShortCircuitOperator(PythonOperator, SkipMixin):
"""
Allows a pipeline to continue based on the result of a ``python_callable``.
The ShortCircuitOperator is derived from the PythonOperator and evaluates the result of a
``python_callable``. If the returned result is False or a falsy value, the pipeline will be
short-circuited. Downstream tasks will be marked with a state of "skipped" based on the short-circuiting
mode configured. If the returned result is True or a truthy value, downstream tasks proceed as normal and
an ``XCom`` of the returned result is pushed.
The short-circuiting can be configured to either respect or ignore the ``trigger_rule`` set for
downstream tasks. If ``ignore_downstream_trigger_rules`` is set to True, the default setting, all
downstream tasks are skipped without considering the ``trigger_rule`` defined for tasks. However, if this
parameter is set to False, the direct downstream tasks are skipped but the specified ``trigger_rule`` for
other subsequent downstream tasks are respected. In this mode, the operator assumes the direct downstream
tasks were purposely meant to be skipped but perhaps not other subsequent tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ShortCircuitOperator`
:param ignore_downstream_trigger_rules: If set to True, all downstream tasks from this operator task will
be skipped. This is the default behavior. If set to False, the direct, downstream task(s) will be
skipped but the ``trigger_rule`` defined for all other downstream tasks will be respected.
"""
def __init__(self, *, ignore_downstream_trigger_rules: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.ignore_downstream_trigger_rules = ignore_downstream_trigger_rules
def execute(self, context: Context) -> Any:
condition = super().execute(context)
self.log.info("Condition result is %s", condition)
if condition:
self.log.info("Proceeding with downstream tasks...")
return condition
if not self.downstream_task_ids:
self.log.info("No downstream tasks; nothing to do.")
return condition
dag_run = context["dag_run"]
def get_tasks_to_skip():
if self.ignore_downstream_trigger_rules is True:
tasks = context["task"].get_flat_relatives(upstream=False)
else:
tasks = context["task"].get_direct_relatives(upstream=False)
for t in tasks:
if not t.is_teardown:
yield t
to_skip = get_tasks_to_skip()
# this let's us avoid an intermediate list unless debug logging
if self.log.getEffectiveLevel() <= logging.DEBUG:
self.log.debug("Downstream task IDs %s", to_skip := list(get_tasks_to_skip()))
self.log.info("Skipping downstream tasks")
self.skip(
dag_run=dag_run,
execution_date=cast("DateTime", dag_run.execution_date),
tasks=to_skip,
map_index=context["ti"].map_index,
)
self.log.info("Done.")
# returns the result of the super execute method as it is instead of returning None
return condition
class _BasePythonVirtualenvOperator(PythonOperator, metaclass=ABCMeta):
BASE_SERIALIZABLE_CONTEXT_KEYS = {
"ds",
"ds_nodash",
"expanded_ti_count",
"inlets",
"next_ds",
"next_ds_nodash",
"outlets",
"prev_ds",
"prev_ds_nodash",
"run_id",
"task_instance_key_str",
"test_mode",
"tomorrow_ds",
"tomorrow_ds_nodash",
"ts",
"ts_nodash",
"ts_nodash_with_tz",
"yesterday_ds",
"yesterday_ds_nodash",
}
PENDULUM_SERIALIZABLE_CONTEXT_KEYS = {
"data_interval_end",
"data_interval_start",
"execution_date",
"logical_date",
"next_execution_date",
"prev_data_interval_end_success",
"prev_data_interval_start_success",
"prev_execution_date",
"prev_execution_date_success",
"prev_start_date_success",
}
AIRFLOW_SERIALIZABLE_CONTEXT_KEYS = {
"macros",
"conf",
"dag",
"dag_run",
"task",
"params",
"triggering_dataset_events",
}
def __init__(
self,
*,
python_callable: Callable,
use_dill: bool = False,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
string_args: Iterable[str] | None = None,
templates_dict: dict | None = None,
templates_exts: list[str] | None = None,
expect_airflow: bool = True,
skip_on_exit_code: int | Container[int] | None = None,
**kwargs,
):
if (
not isinstance(python_callable, types.FunctionType)
or isinstance(python_callable, types.LambdaType)
and python_callable.__name__ == "<lambda>"
):
raise AirflowException("PythonVirtualenvOperator only supports functions for python_callable arg")
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
templates_dict=templates_dict,
templates_exts=templates_exts,
**kwargs,
)
self.string_args = string_args or []
self.use_dill = use_dill
self.pickling_library = dill if self.use_dill else pickle
self.expect_airflow = expect_airflow
self.skip_on_exit_code = (
skip_on_exit_code
if isinstance(skip_on_exit_code, Container)
else [skip_on_exit_code]
if skip_on_exit_code
else []
)
@abstractmethod
def _iter_serializable_context_keys(self):
pass
def execute(self, context: Context) -> Any:
serializable_keys = set(self._iter_serializable_context_keys())
serializable_context = context_copy_partial(context, serializable_keys)
return super().execute(context=serializable_context)
def get_python_source(self):
"""Return the source of self.python_callable."""
return dedent(inspect.getsource(self.python_callable))
def _write_args(self, file: Path):
if self.op_args or self.op_kwargs:
file.write_bytes(self.pickling_library.dumps({"args": self.op_args, "kwargs": self.op_kwargs}))
def _write_string_args(self, file: Path):
file.write_text("\n".join(map(str, self.string_args)))
def _read_result(self, path: Path):
if path.stat().st_size == 0:
return None
try:
return self.pickling_library.loads(path.read_bytes())
except ValueError as value_error:
raise DeserializingResultError() from value_error
def __deepcopy__(self, memo):
# module objects can't be copied _at all__
memo[id(self.pickling_library)] = self.pickling_library
return super().__deepcopy__(memo)
def _execute_python_callable_in_subprocess(self, python_path: Path, tmp_dir: Path):
op_kwargs: dict[str, Any] = {k: v for k, v in self.op_kwargs.items()}
if self.templates_dict:
op_kwargs["templates_dict"] = self.templates_dict
input_path = tmp_dir / "script.in"
output_path = tmp_dir / "script.out"
string_args_path = tmp_dir / "string_args.txt"
script_path = tmp_dir / "script.py"
termination_log_path = tmp_dir / "termination.log"
self._write_args(input_path)
self._write_string_args(string_args_path)
write_python_script(
jinja_context=dict(
op_args=self.op_args,
op_kwargs=op_kwargs,
expect_airflow=self.expect_airflow,
pickling_library=self.pickling_library.__name__,
python_callable=self.python_callable.__name__,
python_callable_source=self.get_python_source(),
),
filename=os.fspath(script_path),
render_template_as_native_obj=self.dag.render_template_as_native_obj,
)
try:
execute_in_subprocess(
cmd=[
os.fspath(python_path),
os.fspath(script_path),
os.fspath(input_path),
os.fspath(output_path),
os.fspath(string_args_path),
os.fspath(termination_log_path),
]
)
except subprocess.CalledProcessError as e:
if e.returncode in self.skip_on_exit_code:
raise AirflowSkipException(f"Process exited with code {e.returncode}. Skipping.")
elif termination_log_path.exists() and termination_log_path.stat().st_size > 0:
error_msg = f"Process returned non-zero exit status {e.returncode}.\n"
with open(termination_log_path) as file:
error_msg += file.read()
raise AirflowException(error_msg) from None
else:
raise
return self._read_result(output_path)
def determine_kwargs(self, context: Mapping[str, Any]) -> Mapping[str, Any]:
return KeywordParameters.determine(self.python_callable, self.op_args, context).serializing()
class PythonVirtualenvOperator(_BasePythonVirtualenvOperator):
"""
Run a function in a virtualenv that is created and destroyed automatically.
The function (has certain caveats) must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtualenv runs in a different Python major version than Airflow,
you cannot use return values, op_args, op_kwargs, or use any macros that are being provided to
Airflow through plugins. You can use string_args though.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonVirtualenvOperator`
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtualenv
:param requirements: Either a list of requirement strings, or a (templated)
"requirements file" as specified by pip.
:param python_version: The Python version to run the virtualenv with. Note that
both 2 and 2.7 are acceptable forms.
:param use_dill: Whether to use dill to serialize
the args and result (pickle is default). This allow more complex types
but requires you to include dill in your requirements.
:param system_site_packages: Whether to include
system_site_packages in your virtualenv.
See virtualenv documentation for more information.
:param pip_install_options: a list of pip install options when installing requirements
See 'pip install -h' for available options
:param op_args: A list of positional arguments to pass to python_callable.
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:param expect_airflow: expect Airflow to be installed in the target environment. If true, the operator
will raise warning if Airflow is not installed, and it will attempt to load Airflow
macros when starting.
:param skip_on_exit_code: If python_callable exits with this exit code, leave the task
in ``skipped`` state (default: None). If set to ``None``, any non-zero
exit code will be treated as a failure.
"""
template_fields: Sequence[str] = tuple({"requirements"} | set(PythonOperator.template_fields))
template_ext: Sequence[str] = (".txt",)
def __init__(
self,
*,
python_callable: Callable,
requirements: None | Iterable[str] | str = None,
python_version: str | int | float | None = None,
use_dill: bool = False,
system_site_packages: bool = True,
pip_install_options: list[str] | None = None,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
string_args: Iterable[str] | None = None,
templates_dict: dict | None = None,
templates_exts: list[str] | None = None,
expect_airflow: bool = True,
skip_on_exit_code: int | Container[int] | None = None,
**kwargs,
):
if (
python_version
and str(python_version)[0] != str(sys.version_info.major)
and (op_args or op_kwargs)
):
raise AirflowException(
"Passing op_args or op_kwargs is not supported across different Python "
"major versions for PythonVirtualenvOperator. Please use string_args."
f"Sys version: {sys.version_info}. Venv version: {python_version}"
)
if not shutil.which("virtualenv"):
raise AirflowException("PythonVirtualenvOperator requires virtualenv, please install it.")
if not requirements:
self.requirements: list[str] | str = []
elif isinstance(requirements, str):
self.requirements = requirements
else:
self.requirements = list(requirements)
self.python_version = python_version
self.system_site_packages = system_site_packages
self.pip_install_options = pip_install_options
super().__init__(
python_callable=python_callable,
use_dill=use_dill,
op_args=op_args,
op_kwargs=op_kwargs,
string_args=string_args,
templates_dict=templates_dict,
templates_exts=templates_exts,
expect_airflow=expect_airflow,
skip_on_exit_code=skip_on_exit_code,
**kwargs,
)
def execute_callable(self):
with TemporaryDirectory(prefix="venv") as tmp_dir:
tmp_path = Path(tmp_dir)
requirements_file_name = f"{tmp_dir}/requirements.txt"
if not isinstance(self.requirements, str):
requirements_file_contents = "\n".join(str(dependency) for dependency in self.requirements)
else:
requirements_file_contents = self.requirements
if not self.system_site_packages and self.use_dill:
requirements_file_contents += "\ndill"
with open(requirements_file_name, "w") as file:
file.write(requirements_file_contents)
prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=f"python{self.python_version}" if self.python_version else None,
system_site_packages=self.system_site_packages,
requirements_file_path=requirements_file_name,
pip_install_options=self.pip_install_options,
)
python_path = tmp_path / "bin" / "python"
result = self._execute_python_callable_in_subprocess(python_path, tmp_path)
return result
def _iter_serializable_context_keys(self):
yield from self.BASE_SERIALIZABLE_CONTEXT_KEYS
if self.system_site_packages or "apache-airflow" in self.requirements:
yield from self.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
elif "pendulum" in self.requirements:
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
class ExternalPythonOperator(_BasePythonVirtualenvOperator):
"""
Run a function in a virtualenv that is not re-created.
Reused as is without the overhead of creating the virtualenv (with certain caveats).
The function must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtualenv runs in a different Python major version than Airflow,
you cannot use return values, op_args, op_kwargs, or use any macros that are being provided to
Airflow through plugins. You can use string_args though.
If Airflow is installed in the external environment in different version that the version
used by the operator, the operator will fail.,
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ExternalPythonOperator`
:param python: Full path string (file-system specific) that points to a Python binary inside
a virtualenv that should be used (in ``VENV/bin`` folder). Should be absolute path
(so usually start with "/" or "X:/" depending on the filesystem/os used).
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtualenv
:param use_dill: Whether to use dill to serialize
the args and result (pickle is default). This allow more complex types
but if dill is not preinstalled in your venv, the task will fail with use_dill enabled.
:param op_args: A list of positional arguments to pass to python_callable.
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:param expect_airflow: expect Airflow to be installed in the target environment. If true, the operator
will raise warning if Airflow is not installed, and it will attempt to load Airflow
macros when starting.
:param skip_on_exit_code: If python_callable exits with this exit code, leave the task
in ``skipped`` state (default: None). If set to ``None``, any non-zero
exit code will be treated as a failure.
"""
template_fields: Sequence[str] = tuple({"python"} | set(PythonOperator.template_fields))
def __init__(
self,
*,
python: str,
python_callable: Callable,
use_dill: bool = False,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
string_args: Iterable[str] | None = None,
templates_dict: dict | None = None,
templates_exts: list[str] | None = None,
expect_airflow: bool = True,
expect_pendulum: bool = False,
skip_on_exit_code: int | Container[int] | None = None,
**kwargs,
):
if not python:
raise ValueError("Python Path must be defined in ExternalPythonOperator")
self.python = python
self.expect_pendulum = expect_pendulum
super().__init__(
python_callable=python_callable,
use_dill=use_dill,
op_args=op_args,
op_kwargs=op_kwargs,
string_args=string_args,
templates_dict=templates_dict,
templates_exts=templates_exts,
expect_airflow=expect_airflow,
skip_on_exit_code=skip_on_exit_code,
**kwargs,
)
def execute_callable(self):
python_path = Path(self.python)
if not python_path.exists():
raise ValueError(f"Python Path '{python_path}' must exists")
if not python_path.is_file():
raise ValueError(f"Python Path '{python_path}' must be a file")
if not python_path.is_absolute():
raise ValueError(f"Python Path '{python_path}' must be an absolute path.")
python_version_as_list_of_strings = self._get_python_version_from_environment()
if (
python_version_as_list_of_strings
and str(python_version_as_list_of_strings[0]) != str(sys.version_info.major)
and (self.op_args or self.op_kwargs)
):
raise AirflowException(
"Passing op_args or op_kwargs is not supported across different Python "
"major versions for ExternalPythonOperator. Please use string_args."
f"Sys version: {sys.version_info}. Venv version: {python_version_as_list_of_strings}"
)
with TemporaryDirectory(prefix="tmd") as tmp_dir:
tmp_path = Path(tmp_dir)
return self._execute_python_callable_in_subprocess(python_path, tmp_path)
def _get_python_version_from_environment(self) -> list[str]:
try:
result = subprocess.check_output([self.python, "--version"], text=True)
return result.strip().split(" ")[-1].split(".")
except Exception as e:
raise ValueError(f"Error while executing {self.python}: {e}")
def _iter_serializable_context_keys(self):
yield from self.BASE_SERIALIZABLE_CONTEXT_KEYS
if self._get_airflow_version_from_target_env():
yield from self.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
elif self._is_pendulum_installed_in_target_env():
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
def _is_pendulum_installed_in_target_env(self) -> bool:
try:
subprocess.check_call([self.python, "-c", "import pendulum"])
return True
except Exception as e:
if self.expect_pendulum:
self.log.warning("When checking for Pendulum installed in venv got %s", e)
self.log.warning(
"Pendulum is not properly installed in the virtualenv "
"Pendulum context keys will not be available. "
"Please Install Pendulum or Airflow in your venv to access them."
)
return False
def _get_airflow_version_from_target_env(self) -> str | None:
from airflow import __version__ as airflow_version
try:
result = subprocess.check_output(
[self.python, "-c", "from airflow import __version__; print(__version__)"],
text=True,
# Avoid Airflow logs polluting stdout.
env={**os.environ, "_AIRFLOW__AS_LIBRARY": "true"},
)
target_airflow_version = result.strip()
if target_airflow_version != airflow_version:
raise AirflowConfigException(
f"The version of Airflow installed for the {self.python}("
f"{target_airflow_version}) is different than the runtime Airflow version: "
f"{airflow_version}. Make sure your environment has the same Airflow version "
f"installed as the Airflow runtime."
)
return target_airflow_version
except Exception as e:
if self.expect_airflow:
self.log.warning("When checking for Airflow installed in venv got %s", e)
self.log.warning(
f"This means that Airflow is not properly installed by "
f"{self.python}. Airflow context keys will not be available. "
f"Please Install Airflow {airflow_version} in your environment to access them."
)
return None
def get_current_context() -> Context:
"""
Retrieve the execution context dictionary without altering user method's signature.
This is the simplest method of retrieving the execution context dictionary.
**Old style:**
.. code:: python
def my_task(**context):
ti = context["ti"]
**New style:**
.. code:: python
from airflow.operators.python import get_current_context
def my_task():
context = get_current_context()
ti = context["ti"]
Current context will only have value if this method was called after an operator
was starting to execute.
"""
if not _CURRENT_CONTEXT:
raise AirflowException(
"Current context was requested but no context was found! "
"Are you running within an airflow task?"
)
return _CURRENT_CONTEXT[-1]
| 34,358 | 41.841646 | 110 |
py
|
airflow
|
airflow-main/airflow/operators/trigger_dagrun.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import json
import time
from typing import TYPE_CHECKING, Any, Sequence, cast
from sqlalchemy.orm.exc import NoResultFound
from airflow.api.common.trigger_dag import trigger_dag
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DagNotFound, DagRunAlreadyExists
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DagModel
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.xcom import XCom
from airflow.triggers.external_task import DagStateTrigger
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.helpers import build_airflow_url_with_query
from airflow.utils.session import provide_session
from airflow.utils.state import DagRunState
from airflow.utils.types import DagRunType
XCOM_EXECUTION_DATE_ISO = "trigger_execution_date_iso"
XCOM_RUN_ID = "trigger_run_id"
if TYPE_CHECKING:
from sqlalchemy.orm.session import Session
from airflow.models.taskinstancekey import TaskInstanceKey
class TriggerDagRunLink(BaseOperatorLink):
"""
Operator link for TriggerDagRunOperator.
It allows users to access DAG triggered by task using TriggerDagRunOperator.
"""
name = "Triggered DAG"
def get_link(self, operator: BaseOperator, *, ti_key: TaskInstanceKey) -> str:
# Fetch the correct execution date for the triggerED dag which is
# stored in xcom during execution of the triggerING task.
when = XCom.get_value(ti_key=ti_key, key=XCOM_EXECUTION_DATE_ISO)
query = {"dag_id": cast(TriggerDagRunOperator, operator).trigger_dag_id, "base_date": when}
return build_airflow_url_with_query(query)
class TriggerDagRunOperator(BaseOperator):
"""
Triggers a DAG run for a specified ``dag_id``.
:param trigger_dag_id: The dag_id to trigger (templated).
:param trigger_run_id: The run ID to use for the triggered DAG run (templated).
If not provided, a run ID will be automatically generated.
:param conf: Configuration for the DAG run (templated).
:param execution_date: Execution date for the dag (templated).
:param reset_dag_run: Whether clear existing dag run if already exists.
This is useful when backfill or rerun an existing dag run.
This only resets (not recreates) the dag run.
Dag run conf is immutable and will not be reset on rerun of an existing dag run.
When reset_dag_run=False and dag run exists, DagRunAlreadyExists will be raised.
When reset_dag_run=True and dag run exists, existing dag run will be cleared to rerun.
:param wait_for_completion: Whether or not wait for dag run completion. (default: False)
:param poke_interval: Poke interval to check dag run status when wait_for_completion=True.
(default: 60)
:param allowed_states: List of allowed states, default is ``['success']``.
:param failed_states: List of failed or dis-allowed states, default is ``None``.
:param deferrable: If waiting for completion, whether or not to defer the task until done,
default is ``False``.
"""
template_fields: Sequence[str] = (
"trigger_dag_id",
"trigger_run_id",
"execution_date",
"conf",
"wait_for_completion",
)
template_fields_renderers = {"conf": "py"}
ui_color = "#ffefeb"
operator_extra_links = [TriggerDagRunLink()]
def __init__(
self,
*,
trigger_dag_id: str,
trigger_run_id: str | None = None,
conf: dict | None = None,
execution_date: str | datetime.datetime | None = None,
reset_dag_run: bool = False,
wait_for_completion: bool = False,
poke_interval: int = 60,
allowed_states: list[str] | None = None,
failed_states: list[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_dag_id = trigger_dag_id
self.trigger_run_id = trigger_run_id
self.conf = conf
self.reset_dag_run = reset_dag_run
self.wait_for_completion = wait_for_completion
self.poke_interval = poke_interval
if allowed_states:
self.allowed_states = [DagRunState(s) for s in allowed_states]
else:
self.allowed_states = [DagRunState.SUCCESS]
if failed_states:
self.failed_states = [DagRunState(s) for s in failed_states]
else:
self.failed_states = [DagRunState.FAILED]
self._defer = deferrable
if execution_date is not None and not isinstance(execution_date, (str, datetime.datetime)):
raise TypeError(
f"Expected str or datetime.datetime type for execution_date.Got {type(execution_date)}"
)
self.execution_date = execution_date
def execute(self, context: Context):
if isinstance(self.execution_date, datetime.datetime):
parsed_execution_date = self.execution_date
elif isinstance(self.execution_date, str):
parsed_execution_date = timezone.parse(self.execution_date)
else:
parsed_execution_date = timezone.utcnow()
try:
json.dumps(self.conf)
except TypeError:
raise AirflowException("conf parameter should be JSON Serializable")
if self.trigger_run_id:
run_id = self.trigger_run_id
else:
run_id = DagRun.generate_run_id(DagRunType.MANUAL, parsed_execution_date)
try:
dag_run = trigger_dag(
dag_id=self.trigger_dag_id,
run_id=run_id,
conf=self.conf,
execution_date=parsed_execution_date,
replace_microseconds=False,
)
except DagRunAlreadyExists as e:
if self.reset_dag_run:
self.log.info("Clearing %s on %s", self.trigger_dag_id, parsed_execution_date)
# Get target dag object and call clear()
dag_model = DagModel.get_current(self.trigger_dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {self.trigger_dag_id} not found in DagModel")
dag_bag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
dag = dag_bag.get_dag(self.trigger_dag_id)
dag.clear(start_date=parsed_execution_date, end_date=parsed_execution_date)
dag_run = e.dag_run
else:
raise e
if dag_run is None:
raise RuntimeError("The dag_run should be set here!")
# Store the execution date from the dag run (either created or found above) to
# be used when creating the extra link on the webserver.
ti = context["task_instance"]
ti.xcom_push(key=XCOM_EXECUTION_DATE_ISO, value=dag_run.execution_date.isoformat())
ti.xcom_push(key=XCOM_RUN_ID, value=dag_run.run_id)
if self.wait_for_completion:
# Kick off the deferral process
if self._defer:
self.defer(
trigger=DagStateTrigger(
dag_id=self.trigger_dag_id,
states=self.allowed_states + self.failed_states,
execution_dates=[parsed_execution_date],
poll_interval=self.poke_interval,
),
method_name="execute_complete",
)
# wait for dag to complete
while True:
self.log.info(
"Waiting for %s on %s to become allowed state %s ...",
self.trigger_dag_id,
dag_run.execution_date,
self.allowed_states,
)
time.sleep(self.poke_interval)
dag_run.refresh_from_db()
state = dag_run.state
if state in self.failed_states:
raise AirflowException(f"{self.trigger_dag_id} failed with failed states {state}")
if state in self.allowed_states:
self.log.info("%s finished with allowed state %s", self.trigger_dag_id, state)
return
@provide_session
def execute_complete(self, context: Context, session: Session, event: tuple[str, dict[str, Any]]):
# This execution date is parsed from the return trigger event
provided_execution_date = event[1]["execution_dates"][0]
try:
dag_run = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.trigger_dag_id, DagRun.execution_date == provided_execution_date
)
.one()
)
except NoResultFound:
raise AirflowException(
f"No DAG run found for DAG {self.trigger_dag_id} and execution date {self.execution_date}"
)
state = dag_run.state
if state in self.failed_states:
raise AirflowException(f"{self.trigger_dag_id} failed with failed state {state}")
if state in self.allowed_states:
self.log.info("%s finished with allowed state %s", self.trigger_dag_id, state)
return
raise AirflowException(
f"{self.trigger_dag_id} return {state} which is not in {self.failed_states}"
f" or {self.allowed_states}"
)
| 10,436 | 39.929412 | 106 |
py
|
airflow
|
airflow-main/airflow/operators/empty.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.models.baseoperator import BaseOperator
from airflow.utils.context import Context
class EmptyOperator(BaseOperator):
"""
Operator that does literally nothing.
It can be used to group tasks in a DAG.
The task is evaluated by the scheduler but never processed by the executor.
"""
ui_color = "#e8f7e4"
inherits_from_empty_operator = True
def execute(self, context: Context):
pass
| 1,257 | 33.944444 | 79 |
py
|
airflow
|
airflow-main/airflow/operators/subdag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated, please use :mod:`airflow.utils.task_group`.
The module which provides a way to nest your DAGs and so your levels of complexity.
"""
from __future__ import annotations
import warnings
from datetime import datetime
from enum import Enum
from sqlalchemy.orm.session import Session
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning, TaskInstanceNotFound
from airflow.models import DagRun
from airflow.models.dag import DAG, DagContext
from airflow.models.pool import Pool
from airflow.models.taskinstance import TaskInstance
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.context import Context
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.state import DagRunState, TaskInstanceState
from airflow.utils.types import DagRunType
class SkippedStatePropagationOptions(Enum):
"""Available options for skipped state propagation of subdag's tasks to parent dag tasks."""
ALL_LEAVES = "all_leaves"
ANY_LEAF = "any_leaf"
class SubDagOperator(BaseSensorOperator):
"""
This class is deprecated, please use `airflow.utils.task_group.TaskGroup`.
This runs a sub dag. By convention, a sub dag's dag_id
should be prefixed by its parent and a dot. As in `parent.child`.
Although SubDagOperator can occupy a pool/concurrency slot,
user can specify the mode=reschedule so that the slot will be
released periodically to avoid potential deadlock.
:param subdag: the DAG object to run as a subdag of the current DAG.
:param session: sqlalchemy session
:param conf: Configuration for the subdag
:param propagate_skipped_state: by setting this argument you can define
whether the skipped state of leaf task(s) should be propagated to the
parent dag's downstream task.
"""
ui_color = "#555"
ui_fgcolor = "#fff"
subdag: DAG
@provide_session
def __init__(
self,
*,
subdag: DAG,
session: Session = NEW_SESSION,
conf: dict | None = None,
propagate_skipped_state: SkippedStatePropagationOptions | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.subdag = subdag
self.conf = conf
self.propagate_skipped_state = propagate_skipped_state
self._validate_dag(kwargs)
self._validate_pool(session)
warnings.warn(
"""This class is deprecated. Please use `airflow.utils.task_group.TaskGroup`.""",
RemovedInAirflow3Warning,
stacklevel=4,
)
def _validate_dag(self, kwargs):
dag = kwargs.get("dag") or DagContext.get_current_dag()
if not dag:
raise AirflowException("Please pass in the `dag` param or call within a DAG context manager")
if dag.dag_id + "." + kwargs["task_id"] != self.subdag.dag_id:
raise AirflowException(
f"The subdag's dag_id should have the form '{{parent_dag_id}}.{{this_task_id}}'. "
f"Expected '{dag.dag_id}.{kwargs['task_id']}'; received '{self.subdag.dag_id}'."
)
def _validate_pool(self, session):
if self.pool:
conflicts = [t for t in self.subdag.tasks if t.pool == self.pool]
if conflicts:
# only query for pool conflicts if one may exist
pool = session.query(Pool).filter(Pool.slots == 1).filter(Pool.pool == self.pool).first()
if pool and any(t.pool == self.pool for t in self.subdag.tasks):
raise AirflowException(
f"SubDagOperator {self.task_id} and subdag task{'s' if len(conflicts) > 1 else ''} "
f"{', '.join(t.task_id for t in conflicts)} both use pool {self.pool}, "
f"but the pool only has 1 slot. The subdag tasks will never run."
)
def _get_dagrun(self, execution_date):
dag_runs = DagRun.find(
dag_id=self.subdag.dag_id,
execution_date=execution_date,
)
return dag_runs[0] if dag_runs else None
def _reset_dag_run_and_task_instances(self, dag_run: DagRun, execution_date: datetime) -> None:
"""Set task instance states to allow for execution.
The state of the DAG run will be set to RUNNING, and failed task
instances to ``None`` for scheduler to pick up.
:param dag_run: DAG run to reset.
:param execution_date: Execution date to select task instances.
"""
with create_session() as session:
dag_run.state = DagRunState.RUNNING
session.merge(dag_run)
failed_task_instances = (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == self.subdag.dag_id)
.filter(TaskInstance.execution_date == execution_date)
.filter(TaskInstance.state.in_((TaskInstanceState.FAILED, TaskInstanceState.UPSTREAM_FAILED)))
)
for task_instance in failed_task_instances:
task_instance.state = None
session.merge(task_instance)
session.commit()
def pre_execute(self, context):
super().pre_execute(context)
execution_date = context["execution_date"]
dag_run = self._get_dagrun(execution_date)
if dag_run is None:
if context["data_interval_start"] is None or context["data_interval_end"] is None:
data_interval: tuple[datetime, datetime] | None = None
else:
data_interval = (context["data_interval_start"], context["data_interval_end"])
dag_run = self.subdag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=execution_date,
state=DagRunState.RUNNING,
conf=self.conf,
external_trigger=True,
data_interval=data_interval,
)
self.log.info("Created DagRun: %s", dag_run.run_id)
else:
self.log.info("Found existing DagRun: %s", dag_run.run_id)
if dag_run.state == DagRunState.FAILED:
self._reset_dag_run_and_task_instances(dag_run, execution_date)
def poke(self, context: Context):
execution_date = context["execution_date"]
dag_run = self._get_dagrun(execution_date=execution_date)
return dag_run.state != DagRunState.RUNNING
def post_execute(self, context, result=None):
super().post_execute(context)
execution_date = context["execution_date"]
dag_run = self._get_dagrun(execution_date=execution_date)
self.log.info("Execution finished. State is %s", dag_run.state)
if dag_run.state != DagRunState.SUCCESS:
raise AirflowException(f"Expected state: SUCCESS. Actual state: {dag_run.state}")
if self.propagate_skipped_state and self._check_skipped_states(context):
self._skip_downstream_tasks(context)
def _check_skipped_states(self, context):
leaves_tis = self._get_leaves_tis(context["execution_date"])
if self.propagate_skipped_state == SkippedStatePropagationOptions.ANY_LEAF:
return any(ti.state == TaskInstanceState.SKIPPED for ti in leaves_tis)
if self.propagate_skipped_state == SkippedStatePropagationOptions.ALL_LEAVES:
return all(ti.state == TaskInstanceState.SKIPPED for ti in leaves_tis)
raise AirflowException(
f"Unimplemented SkippedStatePropagationOptions {self.propagate_skipped_state} used."
)
def _get_leaves_tis(self, execution_date):
leaves_tis = []
for leaf in self.subdag.leaves:
try:
ti = get_task_instance(
dag_id=self.subdag.dag_id, task_id=leaf.task_id, execution_date=execution_date
)
leaves_tis.append(ti)
except TaskInstanceNotFound:
continue
return leaves_tis
def _skip_downstream_tasks(self, context):
self.log.info(
"Skipping downstream tasks because propagate_skipped_state is set to %s "
"and skipped task(s) were found.",
self.propagate_skipped_state,
)
downstream_tasks = context["task"].downstream_list
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(
context["dag_run"],
context["execution_date"],
downstream_tasks,
map_index=context["ti"].map_index,
)
self.log.info("Done.")
| 9,586 | 39.451477 | 110 |
py
|
airflow
|
airflow-main/airflow/operators/email.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Sequence
from airflow.models.baseoperator import BaseOperator
from airflow.utils.context import Context
from airflow.utils.email import send_email
class EmailOperator(BaseOperator):
"""
Sends an email.
:param to: list of emails to send the email to. (templated)
:param subject: subject line for the email. (templated)
:param html_content: content of the email, html markup
is allowed. (templated)
:param files: file names to attach in email (templated)
:param cc: list of recipients to be added in CC field
:param bcc: list of recipients to be added in BCC field
:param mime_subtype: MIME sub content type
:param mime_charset: character set parameter added to the Content-Type
header.
:param custom_headers: additional headers to add to the MIME message.
"""
template_fields: Sequence[str] = ("to", "subject", "html_content", "files")
template_fields_renderers = {"html_content": "html"}
template_ext: Sequence[str] = (".html",)
ui_color = "#e6faf9"
def __init__(
self,
*,
to: list[str] | str,
subject: str,
html_content: str,
files: list | None = None,
cc: list[str] | str | None = None,
bcc: list[str] | str | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
conn_id: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.to = to
self.subject = subject
self.html_content = html_content
self.files = files or []
self.cc = cc
self.bcc = bcc
self.mime_subtype = mime_subtype
self.mime_charset = mime_charset
self.conn_id = conn_id
self.custom_headers = custom_headers
def execute(self, context: Context):
send_email(
self.to,
self.subject,
self.html_content,
files=self.files,
cc=self.cc,
bcc=self.bcc,
mime_subtype=self.mime_subtype,
mime_charset=self.mime_charset,
conn_id=self.conn_id,
custom_headers=self.custom_headers,
)
| 3,086 | 33.685393 | 79 |
py
|
airflow
|
airflow-main/airflow/operators/weekday.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import Iterable
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.operators.branch import BaseBranchOperator
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.weekday import WeekDay
class BranchDayOfWeekOperator(BaseBranchOperator):
"""Branches into one of two lists of tasks depending on the current day.
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BranchDayOfWeekOperator`
**Example** (with single day):
.. code-block:: python
from airflow.operators.empty import EmptyOperator
monday = EmptyOperator(task_id="monday")
other_day = EmptyOperator(task_id="other_day")
monday_check = DayOfWeekSensor(
task_id="monday_check",
week_day="Monday",
use_task_logical_date=True,
follow_task_ids_if_true="monday",
follow_task_ids_if_false="other_day",
)
monday_check >> [monday, other_day]
**Example** (with :class:`~airflow.utils.weekday.WeekDay` enum):
.. code-block:: python
# import WeekDay Enum
from airflow.utils.weekday import WeekDay
from airflow.operators.empty import EmptyOperator
workday = EmptyOperator(task_id="workday")
weekend = EmptyOperator(task_id="weekend")
weekend_check = BranchDayOfWeekOperator(
task_id="weekend_check",
week_day={WeekDay.SATURDAY, WeekDay.SUNDAY},
use_task_logical_date=True,
follow_task_ids_if_true="weekend",
follow_task_ids_if_false="workday",
)
# add downstream dependencies as you would do with any branch operator
weekend_check >> [workday, weekend]
:param follow_task_ids_if_true: task id or task ids to follow if criteria met
:param follow_task_ids_if_false: task id or task ids to follow if criteria does not met
:param week_day: Day of the week to check (full name). Optionally, a set
of days can also be provided using a set. Example values:
* ``"MONDAY"``,
* ``{"Saturday", "Sunday"}``
* ``{WeekDay.TUESDAY}``
* ``{WeekDay.SATURDAY, WeekDay.SUNDAY}``
To use `WeekDay` enum, import it from `airflow.utils.weekday`
:param use_task_logical_date: If ``True``, uses task's logical date to compare
with is_today. Execution Date is Useful for backfilling.
If ``False``, uses system's day of the week.
:param use_task_execution_day: deprecated parameter, same effect as `use_task_logical_date`
"""
def __init__(
self,
*,
follow_task_ids_if_true: str | Iterable[str],
follow_task_ids_if_false: str | Iterable[str],
week_day: str | Iterable[str] | WeekDay | Iterable[WeekDay],
use_task_logical_date: bool = False,
use_task_execution_day: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.follow_task_ids_if_true = follow_task_ids_if_true
self.follow_task_ids_if_false = follow_task_ids_if_false
self.week_day = week_day
self.use_task_logical_date = use_task_logical_date
if use_task_execution_day:
self.use_task_logical_date = use_task_execution_day
warnings.warn(
"Parameter ``use_task_execution_day`` is deprecated. Use ``use_task_logical_date``.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self._week_day_num = WeekDay.validate_week_day(week_day)
def choose_branch(self, context: Context) -> str | Iterable[str]:
if self.use_task_logical_date:
now = context["logical_date"]
else:
now = timezone.make_naive(timezone.utcnow(), self.dag.timezone)
if now.isoweekday() in self._week_day_num:
return self.follow_task_ids_if_true
return self.follow_task_ids_if_false
| 4,842 | 37.744 | 101 |
py
|
airflow
|
airflow-main/airflow/operators/generic_transfer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Sequence
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator
from airflow.utils.context import Context
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another.
Assuming that they both provide the required methods in their respective hooks.
The source hook needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is meant to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database. (templated)
:param destination_table: target table. (templated)
:param source_conn_id: source connection
:param destination_conn_id: destination connection
:param preoperator: sql statement or list of statements to be
executed prior to loading the data. (templated)
:param insert_args: extra params for `insert_rows` method.
"""
template_fields: Sequence[str] = ("sql", "destination_table", "preoperator")
template_ext: Sequence[str] = (
".sql",
".hql",
)
template_fields_renderers = {"preoperator": "sql"}
ui_color = "#b0f07c"
def __init__(
self,
*,
sql: str,
destination_table: str,
source_conn_id: str,
destination_conn_id: str,
preoperator: str | list[str] | None = None,
insert_args: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
self.insert_args = insert_args or {}
def execute(self, context: Context):
source_hook = BaseHook.get_hook(self.source_conn_id)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
self.log.info("Extracting data from %s", self.source_conn_id)
self.log.info("Executing: \n %s", self.sql)
get_records = getattr(source_hook, "get_records", None)
if not callable(get_records):
raise RuntimeError(
f"Hook for connection {self.source_conn_id!r} "
f"({type(source_hook).__name__}) has no `get_records` method"
)
else:
results = get_records(self.sql)
if self.preoperator:
run = getattr(destination_hook, "run", None)
if not callable(run):
raise RuntimeError(
f"Hook for connection {self.destination_conn_id!r} "
f"({type(destination_hook).__name__}) has no `run` method"
)
self.log.info("Running preoperator")
self.log.info(self.preoperator)
run(self.preoperator)
insert_rows = getattr(destination_hook, "insert_rows", None)
if not callable(insert_rows):
raise RuntimeError(
f"Hook for connection {self.destination_conn_id!r} "
f"({type(destination_hook).__name__}) has no `insert_rows` method"
)
self.log.info("Inserting rows into %s", self.destination_conn_id)
insert_rows(table=self.destination_table, rows=results, **self.insert_args)
| 4,134 | 37.64486 | 83 |
py
|
airflow
|
airflow-main/airflow/operators/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# fmt: off
"""Operators."""
from __future__ import annotations
from airflow.utils.deprecation_tools import add_deprecated_classes
__deprecated_classes = {
'bash_operator': {
'BashOperator': 'airflow.operators.bash.BashOperator',
},
'branch_operator': {
'BaseBranchOperator': 'airflow.operators.branch.BaseBranchOperator',
},
'check_operator': {
'SQLCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLCheckOperator',
'SQLIntervalCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLIntervalCheckOperator',
'SQLThresholdCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLThresholdCheckOperator',
'SQLValueCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLValueCheckOperator',
'CheckOperator': 'airflow.providers.common.sql.operators.sql.SQLCheckOperator',
'IntervalCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLIntervalCheckOperator',
'ThresholdCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLThresholdCheckOperator',
'ValueCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLValueCheckOperator',
},
'dagrun_operator': {
'TriggerDagRunLink': 'airflow.operators.trigger_dagrun.TriggerDagRunLink',
'TriggerDagRunOperator': 'airflow.operators.trigger_dagrun.TriggerDagRunOperator',
},
'docker_operator': {
'DockerOperator': 'airflow.providers.docker.operators.docker.DockerOperator',
},
'druid_check_operator': {
'DruidCheckOperator': 'airflow.providers.apache.druid.operators.druid_check.DruidCheckOperator',
},
'dummy': {
'EmptyOperator': 'airflow.operators.empty.EmptyOperator',
'DummyOperator': 'airflow.operators.empty.EmptyOperator',
},
'dummy_operator': {
'EmptyOperator': 'airflow.operators.empty.EmptyOperator',
'DummyOperator': 'airflow.operators.empty.EmptyOperator',
},
'email_operator': {
'EmailOperator': 'airflow.operators.email.EmailOperator',
},
'gcs_to_s3': {
'GCSToS3Operator': 'airflow.providers.amazon.aws.transfers.gcs_to_s3.GCSToS3Operator',
},
'google_api_to_s3_transfer': {
'GoogleApiToS3Operator': (
'airflow.providers.amazon.aws.transfers.google_api_to_s3.GoogleApiToS3Operator'
),
'GoogleApiToS3Transfer': (
'airflow.providers.amazon.aws.transfers.google_api_to_s3.GoogleApiToS3Operator'
),
},
'hive_operator': {
'HiveOperator': 'airflow.providers.apache.hive.operators.hive.HiveOperator',
},
'hive_stats_operator': {
'HiveStatsCollectionOperator': (
'airflow.providers.apache.hive.operators.hive_stats.HiveStatsCollectionOperator'
),
},
'hive_to_druid': {
'HiveToDruidOperator': 'airflow.providers.apache.druid.transfers.hive_to_druid.HiveToDruidOperator',
'HiveToDruidTransfer': 'airflow.providers.apache.druid.transfers.hive_to_druid.HiveToDruidOperator',
},
'hive_to_mysql': {
'HiveToMySqlOperator': 'airflow.providers.apache.hive.transfers.hive_to_mysql.HiveToMySqlOperator',
'HiveToMySqlTransfer': 'airflow.providers.apache.hive.transfers.hive_to_mysql.HiveToMySqlOperator',
},
'hive_to_samba_operator': {
'HiveToSambaOperator': 'airflow.providers.apache.hive.transfers.hive_to_samba.HiveToSambaOperator',
},
'http_operator': {
'SimpleHttpOperator': 'airflow.providers.http.operators.http.SimpleHttpOperator',
},
'jdbc_operator': {
'JdbcOperator': 'airflow.providers.jdbc.operators.jdbc.JdbcOperator',
},
'latest_only_operator': {
'LatestOnlyOperator': 'airflow.operators.latest_only.LatestOnlyOperator',
},
'mssql_operator': {
'MsSqlOperator': 'airflow.providers.microsoft.mssql.operators.mssql.MsSqlOperator',
},
'mssql_to_hive': {
'MsSqlToHiveOperator': 'airflow.providers.apache.hive.transfers.mssql_to_hive.MsSqlToHiveOperator',
'MsSqlToHiveTransfer': 'airflow.providers.apache.hive.transfers.mssql_to_hive.MsSqlToHiveOperator',
},
'mysql_operator': {
'MySqlOperator': 'airflow.providers.mysql.operators.mysql.MySqlOperator',
},
'mysql_to_hive': {
'MySqlToHiveOperator': 'airflow.providers.apache.hive.transfers.mysql_to_hive.MySqlToHiveOperator',
'MySqlToHiveTransfer': 'airflow.providers.apache.hive.transfers.mysql_to_hive.MySqlToHiveOperator',
},
'oracle_operator': {
'OracleOperator': 'airflow.providers.oracle.operators.oracle.OracleOperator',
},
'papermill_operator': {
'PapermillOperator': 'airflow.providers.papermill.operators.papermill.PapermillOperator',
},
'pig_operator': {
'PigOperator': 'airflow.providers.apache.pig.operators.pig.PigOperator',
},
'postgres_operator': {
'Mapping': 'airflow.providers.postgres.operators.postgres.Mapping',
'PostgresOperator': 'airflow.providers.postgres.operators.postgres.PostgresOperator',
},
'presto_check_operator': {
'SQLCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLCheckOperator',
'SQLIntervalCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLIntervalCheckOperator',
'SQLValueCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLValueCheckOperator',
'PrestoCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLCheckOperator',
'PrestoIntervalCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLIntervalCheckOperator',
'PrestoValueCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLValueCheckOperator',
},
'presto_to_mysql': {
'PrestoToMySqlOperator': 'airflow.providers.mysql.transfers.presto_to_mysql.PrestoToMySqlOperator',
'PrestoToMySqlTransfer': 'airflow.providers.mysql.transfers.presto_to_mysql.PrestoToMySqlOperator',
},
'python_operator': {
'BranchPythonOperator': 'airflow.operators.python.BranchPythonOperator',
'PythonOperator': 'airflow.operators.python.PythonOperator',
'PythonVirtualenvOperator': 'airflow.operators.python.PythonVirtualenvOperator',
'ShortCircuitOperator': 'airflow.operators.python.ShortCircuitOperator',
},
'redshift_to_s3_operator': {
'RedshiftToS3Operator': 'airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator',
'RedshiftToS3Transfer': 'airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator',
},
's3_file_transform_operator': {
'S3FileTransformOperator': (
'airflow.providers.amazon.aws.operators.s3_file_transform.S3FileTransformOperator'
),
},
's3_to_hive_operator': {
'S3ToHiveOperator': 'airflow.providers.apache.hive.transfers.s3_to_hive.S3ToHiveOperator',
'S3ToHiveTransfer': 'airflow.providers.apache.hive.transfers.s3_to_hive.S3ToHiveOperator',
},
's3_to_redshift_operator': {
'S3ToRedshiftOperator': 'airflow.providers.amazon.aws.transfers.s3_to_redshift.S3ToRedshiftOperator',
'S3ToRedshiftTransfer': 'airflow.providers.amazon.aws.transfers.s3_to_redshift.S3ToRedshiftOperator',
},
'slack_operator': {
'SlackAPIOperator': 'airflow.providers.slack.operators.slack.SlackAPIOperator',
'SlackAPIPostOperator': 'airflow.providers.slack.operators.slack.SlackAPIPostOperator',
},
'sql': {
'BaseSQLOperator': 'airflow.providers.common.sql.operators.sql.BaseSQLOperator',
'BranchSQLOperator': 'airflow.providers.common.sql.operators.sql.BranchSQLOperator',
'SQLCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLCheckOperator',
'SQLColumnCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLColumnCheckOperator',
'SQLIntervalCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLIntervalCheckOperator',
'SQLTableCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLTableCheckOperator',
'SQLThresholdCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLThresholdCheckOperator',
'SQLValueCheckOperator': 'airflow.providers.common.sql.operators.sql.SQLValueCheckOperator',
'_convert_to_float_if_possible': (
'airflow.providers.common.sql.operators.sql._convert_to_float_if_possible'
),
'parse_boolean': 'airflow.providers.common.sql.operators.sql.parse_boolean',
},
'sql_branch_operator': {
'BranchSQLOperator': 'airflow.providers.common.sql.operators.sql.BranchSQLOperator',
'BranchSqlOperator': 'airflow.providers.common.sql.operators.sql.BranchSQLOperator',
},
'sqlite_operator': {
'SqliteOperator': 'airflow.providers.sqlite.operators.sqlite.SqliteOperator',
},
'subdag_operator': {
'SkippedStatePropagationOptions': 'airflow.operators.subdag.SkippedStatePropagationOptions',
'SubDagOperator': 'airflow.operators.subdag.SubDagOperator',
},
}
add_deprecated_classes(__deprecated_classes, __name__)
| 9,917 | 49.861538 | 109 |
py
|
airflow
|
airflow-main/airflow/operators/branch.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Branching operators."""
from __future__ import annotations
from typing import Iterable
from airflow.models.baseoperator import BaseOperator
from airflow.models.skipmixin import SkipMixin
from airflow.utils.context import Context
class BaseBranchOperator(BaseOperator, SkipMixin):
"""
A base class for creating operators with branching functionality, like to BranchPythonOperator.
Users should create a subclass from this operator and implement the function
`choose_branch(self, context)`. This should run whatever business logic
is needed to determine the branch, and return either the task_id for
a single task (as a str) or a list of task_ids.
The operator will continue with the returned task_id(s), and all other
tasks directly downstream of this operator will be skipped.
"""
def choose_branch(self, context: Context) -> str | Iterable[str]:
"""
Abstract method to choose which branch to run.
Subclasses should implement this, running whatever logic is
necessary to choose a branch and returning a task_id or list of
task_ids.
:param context: Context dictionary as passed to execute()
"""
raise NotImplementedError
def execute(self, context: Context):
branches_to_execute = self.choose_branch(context)
self.skip_all_except(context["ti"], branches_to_execute)
return branches_to_execute
| 2,223 | 38.017544 | 99 |
py
|
airflow
|
airflow-main/airflow/operators/smooth.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.models.baseoperator import BaseOperator
from airflow.utils.context import Context
class SmoothOperator(BaseOperator):
"""Operator that does nothing, it logs a YouTube link to Sade song "Smooth Operator"."""
ui_color = "#e8f7e4"
yt_link: str = "https://www.youtube.com/watch?v=4TYv2PhG89A"
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def execute(self, context: Context):
self.log.info("Enjoy Sade - Smooth Operator: %s", self.yt_link)
| 1,332 | 37.085714 | 92 |
py
|
airflow
|
airflow-main/airflow/operators/bash.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import shutil
import warnings
from functools import cached_property
from typing import Container, Sequence
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.hooks.subprocess import SubprocessHook
from airflow.models.baseoperator import BaseOperator
from airflow.utils.context import Context
from airflow.utils.operator_helpers import context_to_airflow_vars
class BashOperator(BaseOperator):
r"""
Execute a Bash script, command or set of commands.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BashOperator`
If BaseOperator.do_xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command completes
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed. (templated)
:param env: If env is not None, it must be a dict that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:param append_env: If False(default) uses the environment variables passed in env params
and does not inherit the current process environment. If True, inherits the environment variables
from current passes and then environment variable passed by the user will either update the existing
inherited environment variables or the new variables gets appended to it
:param output_encoding: Output encoding of bash command
:param skip_on_exit_code: If task exits with this exit code, leave the task
in ``skipped`` state (default: 99). If set to ``None``, any non-zero
exit code will be treated as a failure.
:param cwd: Working directory to execute the command in.
If None (default), the command is run in a temporary directory.
Airflow will evaluate the exit code of the bash command. In general, a non-zero exit code will result in
task failure and zero will result in task success.
Exit code ``99`` (or another set in ``skip_on_exit_code``)
will throw an :class:`airflow.exceptions.AirflowSkipException`, which will leave the task in ``skipped``
state. You can have all non-zero exit codes be treated as a failure by setting ``skip_on_exit_code=None``.
.. list-table::
:widths: 25 25
:header-rows: 1
* - Exit code
- Behavior
* - 0
- success
* - `skip_on_exit_code` (default: 99)
- raise :class:`airflow.exceptions.AirflowSkipException`
* - otherwise
- raise :class:`airflow.exceptions.AirflowException`
.. note::
Airflow will not recognize a non-zero exit code unless the whole shell exit with a non-zero exit
code. This can be an issue if the non-zero exit arises from a sub-command. The easiest way of
addressing this is to prefix the command with ``set -e;``
Example:
.. code-block:: python
bash_command = "set -e; python3 script.py '{{ next_execution_date }}'"
.. note::
Add a space after the script name when directly calling a ``.sh`` script with the
``bash_command`` argument -- for example ``bash_command="my_script.sh "``. This
is because Airflow tries to apply load this file and process it as a Jinja template to
it ends with ``.sh``, which will likely not be what most users want.
.. warning::
Care should be taken with "user" input or when using Jinja templates in the
``bash_command``, as this bash operator does not perform any escaping or
sanitization of the command.
This applies mostly to using "dag_run" conf, as that can be submitted via
users in the Web UI. Most of the default template variables are not at
risk.
For example, do **not** do this:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command='echo "Here is the message: \'{{ dag_run.conf["message"] if dag_run else "" }}\'"',
)
Instead, you should pass this via the ``env`` kwarg and use double-quotes
inside the bash_command, as below:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command="echo \"here is the message: '$message'\"",
env={"message": '{{ dag_run.conf["message"] if dag_run else "" }}'},
)
"""
template_fields: Sequence[str] = ("bash_command", "env")
template_fields_renderers = {"bash_command": "bash", "env": "json"}
template_ext: Sequence[str] = (
".sh",
".bash",
)
ui_color = "#f0ede4"
def __init__(
self,
*,
bash_command: str,
env: dict[str, str] | None = None,
append_env: bool = False,
output_encoding: str = "utf-8",
skip_exit_code: int | None = None,
skip_on_exit_code: int | Container[int] | None = 99,
cwd: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
if skip_exit_code is not None:
warnings.warn(
"skip_exit_code is deprecated. Please use skip_on_exit_code", DeprecationWarning, stacklevel=2
)
skip_on_exit_code = skip_exit_code
self.skip_on_exit_code = (
skip_on_exit_code
if isinstance(skip_on_exit_code, Container)
else [skip_on_exit_code]
if skip_on_exit_code
else []
)
self.cwd = cwd
self.append_env = append_env
@cached_property
def subprocess_hook(self):
"""Returns hook for running the bash command."""
return SubprocessHook()
def get_env(self, context):
"""Builds the set of environment variables to be exposed for the bash command."""
system_env = os.environ.copy()
env = self.env
if env is None:
env = system_env
else:
if self.append_env:
system_env.update(env)
env = system_env
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug(
"Exporting env vars: %s",
" ".join(f"{k}={v!r}" for k, v in airflow_context_vars.items()),
)
env.update(airflow_context_vars)
return env
def execute(self, context: Context):
bash_path = shutil.which("bash") or "bash"
if self.cwd is not None:
if not os.path.exists(self.cwd):
raise AirflowException(f"Can not find the cwd: {self.cwd}")
if not os.path.isdir(self.cwd):
raise AirflowException(f"The cwd {self.cwd} must be a directory")
env = self.get_env(context)
result = self.subprocess_hook.run_command(
command=[bash_path, "-c", self.bash_command],
env=env,
output_encoding=self.output_encoding,
cwd=self.cwd,
)
if self.skip_on_exit_code is not None and result.exit_code in self.skip_on_exit_code:
raise AirflowSkipException(f"Bash command returned exit code {self.skip_on_exit_code}. Skipping.")
elif result.exit_code != 0:
raise AirflowException(
f"Bash command failed. The command returned a non-zero exit code {result.exit_code}."
)
return result.output
def on_kill(self) -> None:
self.subprocess_hook.send_sigterm()
| 8,567 | 38.483871 | 110 |
py
|
airflow
|
airflow-main/airflow/task/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/task/task_runner/base_task_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base task runner."""
from __future__ import annotations
import os
import subprocess
import threading
from airflow.jobs.local_task_job_runner import LocalTaskJobRunner
from airflow.utils.dag_parsing_context import _airflow_parsing_context_manager
from airflow.utils.platform import IS_WINDOWS
if not IS_WINDOWS:
# ignored to avoid flake complaining on Linux
from pwd import getpwnam # noqa
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.platform import getuser
PYTHONPATH_VAR = "PYTHONPATH"
class BaseTaskRunner(LoggingMixin):
"""
Runs Airflow task instances via CLI.
Invoke the `airflow tasks run` command with raw mode enabled in a subprocess.
:param job_runner: The LocalTaskJobRunner associated with the task runner
"""
def __init__(self, job_runner: LocalTaskJobRunner):
self.job_runner = job_runner
super().__init__(job_runner.task_instance)
self._task_instance = job_runner.task_instance
popen_prepend = []
if self._task_instance.run_as_user:
self.run_as_user: str | None = self._task_instance.run_as_user
else:
try:
self.run_as_user = conf.get("core", "default_impersonation")
except AirflowConfigException:
self.run_as_user = None
# Add sudo commands to change user if we need to. Needed to handle SubDagOperator
# case using a SequentialExecutor.
self.log.debug("Planning to run as the %s user", self.run_as_user)
if self.run_as_user and (self.run_as_user != getuser()):
# We want to include any environment variables now, as we won't
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
# might not be able to run the cmds to get credentials
cfg_path = tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True)
# Give ownership of file to user; only they can read and write
subprocess.check_call(["sudo", "chown", self.run_as_user, cfg_path], close_fds=True)
# propagate PYTHONPATH environment variable
pythonpath_value = os.environ.get(PYTHONPATH_VAR, "")
popen_prepend = ["sudo", "-E", "-H", "-u", self.run_as_user]
if pythonpath_value:
popen_prepend.append(f"{PYTHONPATH_VAR}={pythonpath_value}")
else:
# Always provide a copy of the configuration file settings. Since
# we are running as the same user, and can pass through environment
# variables then we don't need to include those in the config copy
# - the runner can read/execute those values as it needs
cfg_path = tmp_configuration_copy(chmod=0o600, include_env=False, include_cmds=False)
self._cfg_path = cfg_path
self._command = popen_prepend + self._task_instance.command_as_list(
raw=True,
pickle_id=self.job_runner.pickle_id,
mark_success=self.job_runner.mark_success,
job_id=self.job_runner.job.id,
pool=self.job_runner.pool,
cfg_path=cfg_path,
)
self.process = None
def _read_task_logs(self, stream):
while True:
line = stream.readline()
if isinstance(line, bytes):
line = line.decode("utf-8")
if not line:
break
self.log.info(
"Job %s: Subtask %s %s",
self._task_instance.job_id,
self._task_instance.task_id,
line.rstrip("\n"),
)
def run_command(self, run_with=None) -> subprocess.Popen:
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:return: the process that was run
"""
run_with = run_with or []
full_cmd = run_with + self._command
self.log.info("Running on host: %s", get_hostname())
self.log.info("Running: %s", full_cmd)
with _airflow_parsing_context_manager(
dag_id=self._task_instance.dag_id,
task_id=self._task_instance.task_id,
):
if IS_WINDOWS:
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
)
else:
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid,
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc
def start(self):
"""Start running the task instance in a subprocess."""
raise NotImplementedError()
def return_code(self, timeout: float = 0.0) -> int | None:
"""
Extract the return code.
:return: The return code associated with running the task instance or
None if the task is not yet done.
"""
raise NotImplementedError()
def terminate(self) -> None:
"""Force kill the running task instance."""
raise NotImplementedError()
def on_finish(self) -> None:
"""A callback that should be called when this is done running."""
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(["sudo", "rm", self._cfg_path], close_fds=True)
else:
os.remove(self._cfg_path)
def get_process_pid(self) -> int:
"""Get the process pid."""
if hasattr(self, "process") and self.process is not None and hasattr(self.process, "pid"):
# this is a backwards compatibility for custom task runners that were used before
# the process.pid attribute was accessed by local_task_job directly but since process
# was either subprocess.Popen or psutil.Process it was not possible to have it really
# common in the base task runner - instead we changed it to use get_process_pid method and leave
# it to the task_runner to implement it
return self.process.pid
raise NotImplementedError()
| 7,824 | 38.520202 | 108 |
py
|
airflow
|
airflow-main/airflow/task/task_runner/cgroup_task_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task runner for cgroup to run Airflow task."""
from __future__ import annotations
import datetime
import os
import uuid
import psutil
from cgroupspy import trees
from airflow.jobs.local_task_job_runner import LocalTaskJobRunner
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.operator_resources import Resources
from airflow.utils.platform import getuser
from airflow.utils.process_utils import reap_process_group
class CgroupTaskRunner(BaseTaskRunner):
"""
Runs the raw Airflow task in a cgroup container.
With containment for memory and cpu. It uses the resource requirements
defined in the task to construct the settings for the cgroup.
Cgroup must be mounted first otherwise CgroupTaskRunner
will not be able to work.
cgroup-bin Ubuntu package must be installed to use cgexec command.
Note that this task runner will only work if the Airflow user has root privileges,
e.g. if the airflow user is called `airflow` then the following entries (or an even
less restrictive ones) are needed in the sudoers file (replacing
/CGROUPS_FOLDER with your system's cgroups folder, e.g. '/sys/fs/cgroup/'):
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/* *
"""
def __init__(self, job_runner: LocalTaskJobRunner):
super().__init__(job_runner=job_runner)
self.process = None
self._finished_running = False
self._cpu_shares = None
self._mem_mb_limit = None
self.mem_cgroup_name = None
self.cpu_cgroup_name = None
self._created_cpu_cgroup = False
self._created_mem_cgroup = False
self._cur_user = getuser()
def _create_cgroup(self, path) -> trees.Node:
"""
Create the specified cgroup.
:param path: The path of the cgroup to create.
E.g. cpu/mygroup/mysubgroup
:return: the Node associated with the created cgroup.
"""
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
# node.name is encoded to bytes:
# https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L64
name_to_node = {x.name.decode(): x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup %s in %s", path_element, node.path.decode())
node = node.create_cgroup(path_element)
else:
self.log.debug(
"Not creating cgroup %s in %s since it already exists", path_element, node.path.decode()
)
node = name_to_node[path_element]
return node
def _delete_cgroup(self, path):
"""
Delete the specified cgroup.
:param path: The path of the cgroup to delete.
E.g. cpu/mygroup/mysubgroup
"""
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name.decode(): x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
return
else:
node = name_to_node[path_element]
# node is now the leaf node
parent = node.parent
self.log.debug("Deleting cgroup %s/%s", parent, node.name)
parent.delete_cgroup(node.name.decode())
def start(self):
# Use bash if it's already in a cgroup
cgroups = self._get_cgroup_names()
if (cgroups.get("cpu") and cgroups.get("cpu") != "/") or (
cgroups.get("memory") and cgroups.get("memory") != "/"
):
self.log.debug(
"Already running in a cgroup (cpu: %s memory: %s) so not creating another one",
cgroups.get("cpu"),
cgroups.get("memory"),
)
self.process = self.run_command()
return
# Create a unique cgroup name
cgroup_name = f"airflow/{datetime.datetime.utcnow().strftime('%Y-%m-%d')}/{str(uuid.uuid4())}"
self.mem_cgroup_name = f"memory/{cgroup_name}"
self.cpu_cgroup_name = f"cpu/{cgroup_name}"
# Get the resource requirements from the task
task = self._task_instance.task
resources = task.resources if task.resources is not None else Resources()
cpus = resources.cpus.qty
self._cpu_shares = cpus * 1024
self._mem_mb_limit = resources.ram.qty
# Create the memory cgroup
self.mem_cgroup_node = self._create_cgroup(self.mem_cgroup_name)
self._created_mem_cgroup = True
if self._mem_mb_limit > 0:
self.log.debug("Setting %s with %s MB of memory", self.mem_cgroup_name, self._mem_mb_limit)
self.mem_cgroup_node.controller.limit_in_bytes = self._mem_mb_limit * 1024 * 1024
# Create the CPU cgroup
cpu_cgroup_node = self._create_cgroup(self.cpu_cgroup_name)
self._created_cpu_cgroup = True
if self._cpu_shares > 0:
self.log.debug("Setting %s with %s CPU shares", self.cpu_cgroup_name, self._cpu_shares)
cpu_cgroup_node.controller.shares = self._cpu_shares
# Start the process w/ cgroups
self.log.debug("Starting task process with cgroups cpu,memory: %s", cgroup_name)
self.process = self.run_command(["cgexec", "-g", f"cpu,memory:{cgroup_name}"])
def return_code(self, timeout: float = 0) -> int | None:
if self.process is None:
return None
return_code = self.process.poll()
# TODO(plypaul) Monitoring the control file in the cgroup fs is better than
# checking the return code here. The PR to use this is here:
# https://github.com/plypaul/airflow/blob/e144e4d41996300ffa93947f136eab7785b114ed/airflow/contrib/task_runner/cgroup_task_runner.py#L43
# but there were some issues installing the python butter package and
# libseccomp-dev on some hosts for some reason.
# I wasn't able to track down the root cause of the package install failures, but
# we might want to revisit that approach at some other point.
if return_code == 137:
self.log.error(
"Task failed with return code of 137. This may indicate "
"that it was killed due to excessive memory usage. "
"Please consider optimizing your task or using the "
"resources argument to reserve more memory for your task"
)
return return_code
def terminate(self):
if self.process and psutil.pid_exists(self.process.pid):
reap_process_group(self.process.pid, self.log)
def _log_memory_usage(self, mem_cgroup_node):
def byte_to_gb(num_bytes, precision=2):
return round(num_bytes / (1024 * 1024 * 1024), precision)
with open(mem_cgroup_node.full_path + "/memory.max_usage_in_bytes") as f:
max_usage_in_bytes = int(f.read().strip())
used_gb = byte_to_gb(max_usage_in_bytes)
limit_gb = byte_to_gb(mem_cgroup_node.controller.limit_in_bytes)
self.log.info(
"Memory max usage of the task is %s GB, while the memory limit is %s GB", used_gb, limit_gb
)
if max_usage_in_bytes >= mem_cgroup_node.controller.limit_in_bytes:
self.log.info(
"This task has reached the memory limit allocated by Airflow worker. "
"If it failed, try to optimize the task or reserve more memory."
)
def on_finish(self):
# Let the OOM watcher thread know we're done to avoid false OOM alarms
self._finished_running = True
# Clean up the cgroups
if self._created_mem_cgroup:
self._log_memory_usage(self.mem_cgroup_node)
self._delete_cgroup(self.mem_cgroup_name)
if self._created_cpu_cgroup:
self._delete_cgroup(self.cpu_cgroup_name)
super().on_finish()
@staticmethod
def _get_cgroup_names() -> dict[str, str]:
"""
Get the mapping between the subsystem name and the cgroup name.
:return: a mapping between the subsystem name to the cgroup name
"""
with open("/proc/self/cgroup") as file:
lines = file.readlines()
subsystem_cgroup_map = {}
for line in lines:
line_split = line.rstrip().split(":")
subsystem = line_split[1]
group_name = line_split[2]
subsystem_cgroup_map[subsystem] = group_name
return subsystem_cgroup_map
def get_process_pid(self) -> int:
if self.process is None:
raise RuntimeError("Process is not started yet")
return self.process.pid
| 10,559 | 42.45679 | 144 |
py
|
airflow
|
airflow-main/airflow/task/task_runner/standard_task_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Standard task runner."""
from __future__ import annotations
import logging
import os
import psutil
from setproctitle import setproctitle
from airflow.jobs.local_task_job_runner import LocalTaskJobRunner
from airflow.models.taskinstance import TaskReturnCode
from airflow.settings import CAN_FORK
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.dag_parsing_context import _airflow_parsing_context_manager
from airflow.utils.process_utils import reap_process_group, set_new_process_group
class StandardTaskRunner(BaseTaskRunner):
"""Standard runner for all tasks."""
def __init__(self, job_runner: LocalTaskJobRunner):
super().__init__(job_runner=job_runner)
self._rc = None
self.dag = self._task_instance.task.dag
def start(self):
if CAN_FORK and not self.run_as_user:
self.process = self._start_by_fork()
else:
self.process = self._start_by_exec()
def _start_by_exec(self) -> psutil.Process:
subprocess = self.run_command()
self.process = psutil.Process(subprocess.pid)
return self.process
def _start_by_fork(self):
pid = os.fork()
if pid:
self.log.info("Started process %d to run task", pid)
return psutil.Process(pid)
else:
# Start a new process group
set_new_process_group()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
from airflow import settings
from airflow.cli.cli_parser import get_parser
from airflow.sentry import Sentry
# Force a new SQLAlchemy session. We can't share open DB handles
# between process. The cli code will re-create this as part of its
# normal startup
settings.engine.pool.dispose()
settings.engine.dispose()
parser = get_parser()
# [1:] - remove "airflow" from the start of the command
args = parser.parse_args(self._command[1:])
# We prefer the job_id passed on the command-line because at this time, the
# task instance may not have been updated.
job_id = getattr(args, "job_id", self._task_instance.job_id)
self.log.info("Running: %s", self._command)
self.log.info("Job %s: Subtask %s", job_id, self._task_instance.task_id)
proc_title = "airflow task runner: {0.dag_id} {0.task_id} {0.execution_date_or_run_id}"
if job_id is not None:
proc_title += " {0.job_id}"
setproctitle(proc_title.format(args))
return_code = 0
try:
with _airflow_parsing_context_manager(
dag_id=self._task_instance.dag_id,
task_id=self._task_instance.task_id,
):
ret = args.func(args, dag=self.dag)
return_code = 0
if isinstance(ret, TaskReturnCode):
return_code = ret.value
except Exception as exc:
return_code = 1
self.log.error(
"Failed to execute job %s for task %s (%s; %r)",
job_id,
self._task_instance.task_id,
exc,
os.getpid(),
)
except SystemExit as sys_ex:
# Someone called sys.exit() in the fork - mistakenly. You should not run sys.exit() in
# the fork because you can mistakenly execute atexit that were set by the parent process
# before fork happened
return_code = sys_ex.code
except BaseException:
# while we want to handle Also Base exceptions here - we do not want to log them (this
# is the default behaviour anyway. Setting the return code here to 2 to indicate that
# this had happened.
return_code = 2
finally:
try:
# Explicitly flush any pending exception to Sentry and logging if enabled
Sentry.flush()
logging.shutdown()
except BaseException:
# also make sure to silently ignore ALL POSSIBLE exceptions thrown in the flush/shutdown,
# otherwise os._exit() might never be called. We could have used `except:` but
# except BaseException is more explicit (and linters do not comply).
pass
# We run os._exit() making sure it is not run within the `finally` clause.
# We cannot run os._exit() in finally clause, because during finally clause processing, the
# Exception handled is held in memory as well as stack trace and possibly some objects that
# might need to be finalized. Running os._exit() inside the `finally` clause might cause effects
# similar to https://github.com/apache/airflow/issues/22404. There Temporary file has not been
# deleted at os._exit()
os._exit(return_code)
def return_code(self, timeout: float = 0) -> int | None:
# We call this multiple times, but we can only wait on the process once
if self._rc is not None or not self.process:
return self._rc
try:
self._rc = self.process.wait(timeout=timeout)
self.process = None
except psutil.TimeoutExpired:
pass
return self._rc
def terminate(self):
if self.process is None:
return
# Reap the child process - it may already be finished
_ = self.return_code(timeout=0)
if self.process and self.process.is_running():
rcs = reap_process_group(self.process.pid, self.log)
self._rc = rcs.get(self.process.pid)
self.process = None
if self._rc is None:
# Something else reaped it before we had a chance, so let's just "guess" at an error code.
self._rc = -9
if self._rc == -9:
# If either we or psutil gives out a -9 return code, it likely means
# an OOM happened
self.log.error(
"Job %s was killed before it finished (likely due to running out of memory)",
self._task_instance.job_id,
)
def get_process_pid(self) -> int:
if self.process is None:
raise RuntimeError("Process is not started yet")
return self.process.pid
| 7,470 | 40.276243 | 109 |
py
|
airflow
|
airflow-main/airflow/task/task_runner/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.jobs.local_task_job_runner import LocalTaskJobRunner
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.module_loading import import_string
log = logging.getLogger(__name__)
_TASK_RUNNER_NAME = conf.get("core", "TASK_RUNNER")
STANDARD_TASK_RUNNER = "StandardTaskRunner"
CGROUP_TASK_RUNNER = "CgroupTaskRunner"
CORE_TASK_RUNNERS = {
STANDARD_TASK_RUNNER: "airflow.task.task_runner.standard_task_runner.StandardTaskRunner",
CGROUP_TASK_RUNNER: "airflow.task.task_runner.cgroup_task_runner.CgroupTaskRunner",
}
def get_task_runner(local_task_job_runner: LocalTaskJobRunner) -> BaseTaskRunner:
"""
Get the task runner that can be used to run with the given job runner.
:param local_task_job_runner: The LocalTaskJobRunner associated with the TaskInstance
that needs to be executed.
:return: The task runner to use to run the task.
"""
if _TASK_RUNNER_NAME in CORE_TASK_RUNNERS:
log.debug("Loading core task runner: %s", _TASK_RUNNER_NAME)
task_runner_class = import_string(CORE_TASK_RUNNERS[_TASK_RUNNER_NAME])
else:
log.debug("Loading task runner from custom path: %s", _TASK_RUNNER_NAME)
try:
task_runner_class = import_string(_TASK_RUNNER_NAME)
except ImportError:
raise AirflowConfigException(
f'The task runner could not be loaded. Please check "task_runner" key in "core" section. '
f'Current value: "{_TASK_RUNNER_NAME}".'
)
task_runner = task_runner_class(local_task_job_runner)
return task_runner
| 2,552 | 38.890625 | 106 |
py
|
airflow
|
airflow-main/airflow/triggers/base.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import abc
from typing import Any, AsyncIterator
from airflow.utils.log.logging_mixin import LoggingMixin
class BaseTrigger(abc.ABC, LoggingMixin):
"""
Base class for all triggers.
A trigger has two contexts it can exist in:
- Inside an Operator, when it's passed to TaskDeferred
- Actively running in a trigger worker
We use the same class for both situations, and rely on all Trigger classes
to be able to return the arguments (possible to encode with Airflow-JSON) that will
let them be re-instantiated elsewhere.
"""
def __init__(self, **kwargs):
# these values are set by triggerer when preparing to run the instance
# when run, they are injected into logger record.
self.task_instance = None
self.trigger_id = None
def _set_context(self, context):
"""Part of LoggingMixin and used mainly for configuration of task logging; not used for triggers."""
raise NotImplementedError
@abc.abstractmethod
def serialize(self) -> tuple[str, dict[str, Any]]:
"""
Returns the information needed to reconstruct this Trigger.
:return: Tuple of (class path, keyword arguments needed to re-instantiate).
"""
raise NotImplementedError("Triggers must implement serialize()")
@abc.abstractmethod
async def run(self) -> AsyncIterator[TriggerEvent]:
"""
Runs the trigger in an asynchronous context.
The trigger should yield an Event whenever it wants to fire off
an event, and return None if it is finished. Single-event triggers
should thus yield and then immediately return.
If it yields, it is likely that it will be resumed very quickly,
but it may not be (e.g. if the workload is being moved to another
triggerer process, or a multi-event trigger was being used for a
single-event task defer).
In either case, Trigger classes should assume they will be persisted,
and then rely on cleanup() being called when they are no longer needed.
"""
raise NotImplementedError("Triggers must implement run()")
yield # To convince Mypy this is an async iterator.
async def cleanup(self) -> None:
"""
Cleanup the trigger.
Called when the trigger is no longer needed, and it's being removed
from the active triggerer process.
This method follows the async/await pattern to allow to run the cleanup
in triggerer main event loop. Exceptions raised by the cleanup method
are ignored, so if you would like to be able to debug them and be notified
that cleanup method failed, you should wrap your code with try/except block
and handle it appropriately (in async-compatible way).
"""
def __repr__(self) -> str:
classpath, kwargs = self.serialize()
kwargs_str = ", ".join(f"{k}={v}" for k, v in kwargs.items())
return f"<{classpath} {kwargs_str}>"
class TriggerEvent:
"""
Something that a trigger can fire when its conditions are met.
Events must have a uniquely identifying value that would be the same
wherever the trigger is run; this is to ensure that if the same trigger
is being run in two locations (for HA reasons) that we can deduplicate its
events.
"""
def __init__(self, payload: Any):
self.payload = payload
def __repr__(self) -> str:
return f"TriggerEvent<{self.payload!r}>"
def __eq__(self, other):
if isinstance(other, TriggerEvent):
return other.payload == self.payload
return False
| 4,471 | 36.579832 | 108 |
py
|
airflow
|
airflow-main/airflow/triggers/file.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import datetime
import os
import typing
from glob import glob
from typing import Any
from airflow.triggers.base import BaseTrigger, TriggerEvent
class FileTrigger(BaseTrigger):
"""
A trigger that fires exactly once after it finds the requested file or folder.
:param filepath: File or folder name (relative to the base path set within the connection), can
be a glob.
:param recursive: when set to ``True``, enables recursive directory matching behavior of
``**`` in glob filepath parameter. Defaults to ``False``.
"""
def __init__(
self,
filepath: str,
recursive: bool = False,
poll_interval: float = 5.0,
):
super().__init__()
self.filepath = filepath
self.recursive = recursive
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes FileTrigger arguments and classpath."""
return (
"airflow.triggers.file.FileTrigger",
{
"filepath": self.filepath,
"recursive": self.recursive,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> typing.AsyncIterator[TriggerEvent]:
"""Loop until the relevant files are found."""
while True:
for path in glob(self.filepath, recursive=self.recursive):
if os.path.isfile(path):
mod_time_f = os.path.getmtime(path)
mod_time = datetime.datetime.fromtimestamp(mod_time_f).strftime("%Y%m%d%H%M%S")
self.log.info("Found File %s last modified: %s", str(path), str(mod_time))
yield TriggerEvent(True)
for _, _, files in os.walk(self.filepath):
if len(files) > 0:
yield TriggerEvent(True)
await asyncio.sleep(self.poll_interval)
| 2,774 | 36.5 | 99 |
py
|
airflow
|
airflow-main/airflow/triggers/testing.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from airflow.triggers.base import BaseTrigger, TriggerEvent
class SuccessTrigger(BaseTrigger):
"""
A trigger that always succeeds immediately.
Should only be used for testing.
"""
def serialize(self) -> tuple[str, dict[str, Any]]:
return ("airflow.triggers.testing.SuccessTrigger", {})
async def run(self):
yield TriggerEvent(True)
class FailureTrigger(BaseTrigger):
"""
A trigger that always errors immediately.
Should only be used for testing.
"""
def serialize(self) -> tuple[str, dict[str, Any]]:
return ("airflow.triggers.testing.FailureTrigger", {})
async def run(self):
# Python needs at least one "yield" keyword in the body to make
# this an async generator.
if False:
yield None
raise ValueError("Deliberate trigger failure")
| 1,707 | 30.62963 | 71 |
py
|
airflow
|
airflow-main/airflow/triggers/temporal.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import datetime
from typing import Any
from airflow.triggers.base import BaseTrigger, TriggerEvent
from airflow.utils import timezone
class DateTimeTrigger(BaseTrigger):
"""
Trigger based on a datetime.
A trigger that fires exactly once, at the given datetime, give or take
a few seconds.
The provided datetime MUST be in UTC.
"""
def __init__(self, moment: datetime.datetime):
super().__init__()
if not isinstance(moment, datetime.datetime):
raise TypeError(f"Expected datetime.datetime type for moment. Got {type(moment)}")
# Make sure it's in UTC
elif moment.tzinfo is None:
raise ValueError("You cannot pass naive datetimes")
else:
self.moment = timezone.convert_to_utc(moment)
def serialize(self) -> tuple[str, dict[str, Any]]:
return ("airflow.triggers.temporal.DateTimeTrigger", {"moment": self.moment})
async def run(self):
"""
Simple time delay loop until the relevant time is met.
We do have a two-phase delay to save some cycles, but sleeping is so
cheap anyway that it's pretty loose. We also don't just sleep for
"the number of seconds until the time" in case the system clock changes
unexpectedly, or handles a DST change poorly.
"""
# Sleep in successively smaller increments starting from 1 hour down to 10 seconds at a time
self.log.info("trigger starting")
for step in 3600, 60, 10:
seconds_remaining = (self.moment - timezone.utcnow()).total_seconds()
while seconds_remaining > 2 * step:
self.log.info(f"{int(seconds_remaining)} seconds remaining; sleeping {step} seconds")
await asyncio.sleep(step)
seconds_remaining = (self.moment - timezone.utcnow()).total_seconds()
# Sleep a second at a time otherwise
while self.moment > timezone.utcnow():
self.log.info("sleeping 1 second...")
await asyncio.sleep(1)
# Send our single event and then we're done
self.log.info("yielding event with payload %r", self.moment)
yield TriggerEvent(self.moment)
class TimeDeltaTrigger(DateTimeTrigger):
"""
Create DateTimeTriggers based on delays.
Subclass to create DateTimeTriggers based on time delays rather
than exact moments.
While this is its own distinct class here, it will serialise to a
DateTimeTrigger class, since they're operationally the same.
"""
def __init__(self, delta: datetime.timedelta):
super().__init__(moment=timezone.utcnow() + delta)
| 3,488 | 38.202247 | 101 |
py
|
airflow
|
airflow-main/airflow/triggers/external_task.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import datetime
import typing
from asgiref.sync import sync_to_async
from sqlalchemy import func
from sqlalchemy.orm import Session
from airflow.models import DagRun, TaskInstance
from airflow.triggers.base import BaseTrigger, TriggerEvent
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import DagRunState
class TaskStateTrigger(BaseTrigger):
"""
Waits asynchronously for a task in a different DAG to complete for a specific logical date.
:param dag_id: The dag_id that contains the task you want to wait for
:param task_id: The task_id that contains the task you want to
wait for. If ``None`` (default value) the sensor waits for the DAG
:param states: allowed states, default is ``['success']``
:param execution_dates:
:param poll_interval: The time interval in seconds to check the state.
The default value is 5 sec.
"""
def __init__(
self,
dag_id: str,
task_id: str,
states: list[str],
execution_dates: list[datetime.datetime],
poll_interval: float = 5.0,
):
super().__init__()
self.dag_id = dag_id
self.task_id = task_id
self.states = states
self.execution_dates = execution_dates
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, typing.Any]]:
"""Serializes TaskStateTrigger arguments and classpath."""
return (
"airflow.triggers.external_task.TaskStateTrigger",
{
"dag_id": self.dag_id,
"task_id": self.task_id,
"states": self.states,
"execution_dates": self.execution_dates,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> typing.AsyncIterator[TriggerEvent]:
"""Checks periodically in the database to see if the task exists and has hit one of the states."""
while True:
# mypy confuses typing here
num_tasks = await self.count_tasks() # type: ignore[call-arg]
if num_tasks == len(self.execution_dates):
yield TriggerEvent(True)
await asyncio.sleep(self.poll_interval)
@sync_to_async
@provide_session
def count_tasks(self, *, session: Session = NEW_SESSION) -> int | None:
"""Count how many task instances in the database match our criteria."""
count = (
session.query(func.count("*")) # .count() is inefficient
.filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.state.in_(self.states),
TaskInstance.execution_date.in_(self.execution_dates),
)
.scalar()
)
return typing.cast(int, count)
class DagStateTrigger(BaseTrigger):
"""
Waits asynchronously for a DAG to complete for a specific logical date.
:param dag_id: The dag_id that contains the task you want to wait for
:param states: allowed states, default is ``['success']``
:param execution_dates: The logical date at which DAG run.
:param poll_interval: The time interval in seconds to check the state.
The default value is 5.0 sec.
"""
def __init__(
self,
dag_id: str,
states: list[DagRunState],
execution_dates: list[datetime.datetime],
poll_interval: float = 5.0,
):
super().__init__()
self.dag_id = dag_id
self.states = states
self.execution_dates = execution_dates
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, typing.Any]]:
"""Serializes DagStateTrigger arguments and classpath."""
return (
"airflow.triggers.external_task.DagStateTrigger",
{
"dag_id": self.dag_id,
"states": self.states,
"execution_dates": self.execution_dates,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> typing.AsyncIterator[TriggerEvent]:
"""Checks periodically in the database to see if the dag run exists and has hit one of the states."""
while True:
# mypy confuses typing here
num_dags = await self.count_dags() # type: ignore[call-arg]
if num_dags == len(self.execution_dates):
yield TriggerEvent(self.serialize())
await asyncio.sleep(self.poll_interval)
@sync_to_async
@provide_session
def count_dags(self, *, session: Session = NEW_SESSION) -> int | None:
"""Count how many dag runs in the database match our criteria."""
count = (
session.query(func.count("*")) # .count() is inefficient
.filter(
DagRun.dag_id == self.dag_id,
DagRun.state.in_(self.states),
DagRun.execution_date.in_(self.execution_dates),
)
.scalar()
)
return typing.cast(int, count)
| 5,952 | 36.440252 | 109 |
py
|
airflow
|
airflow-main/airflow/triggers/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/hooks/base.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base class for all hooks."""
from __future__ import annotations
import logging
import warnings
from typing import TYPE_CHECKING, Any
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.typing_compat import Protocol
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from airflow.models.connection import Connection # Avoid circular imports.
log = logging.getLogger(__name__)
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks.
Hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
@classmethod
def get_connections(cls, conn_id: str) -> list[Connection]:
"""
Get all connections as an iterable, given the connection id.
:param conn_id: connection id
:return: array of connections
"""
warnings.warn(
"`BaseHook.get_connections` method will be deprecated in the future."
"Please use `BaseHook.get_connection` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return [cls.get_connection(conn_id)]
@classmethod
def get_connection(cls, conn_id: str) -> Connection:
"""
Get connection, given connection id.
:param conn_id: connection id
:return: connection
"""
from airflow.models.connection import Connection
conn = Connection.get_connection_from_secrets(conn_id)
log.info("Using connection ID '%s' for task execution.", conn.conn_id)
return conn
@classmethod
def get_hook(cls, conn_id: str) -> BaseHook:
"""
Returns default hook for this connection id.
:param conn_id: connection id
:return: default hook for this connection
"""
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self) -> Any:
"""Returns connection for the hook."""
raise NotImplementedError()
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
return {}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
return {}
class DiscoverableHook(Protocol):
"""
Interface that providers *can* implement to be discovered by ProvidersManager.
It is not used by any of the Hooks, but simply methods and class fields described here are
implemented by those Hooks. Each method is optional -- only implement the ones you need.
The conn_name_attr, default_conn_name, conn_type should be implemented by those
Hooks that want to be automatically mapped from the connection_type -> Hook when get_hook method
is called with connection_type.
Additionally hook_name should be set when you want the hook to have a custom name in the UI selection
Name. If not specified, conn_name will be used.
The "get_ui_field_behaviour" and "get_connection_form_widgets" are optional - override them if you want
to customize the Connection Form screen. You can add extra widgets to parse your extra fields via the
get_connection_form_widgets method as well as hide or relabel the fields or pre-fill
them with placeholders via get_ui_field_behaviour method.
Note that the "get_ui_field_behaviour" and "get_connection_form_widgets" need to be set by each class
in the class hierarchy in order to apply widget customizations.
For example, even if you want to use the fields from your parent class, you must explicitly
have a method on *your* class:
.. code-block:: python
@classmethod
def get_ui_field_behaviour(cls):
return super().get_ui_field_behaviour()
You also need to add the Hook class name to list 'hook_class_names' in provider.yaml in case you
build an internal provider or to return it in dictionary returned by provider_info entrypoint in the
package you prepare.
You can see some examples in airflow/providers/jdbc/hooks/jdbc.py.
"""
conn_name_attr: str
default_conn_name: str
conn_type: str
hook_name: str
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""
Returns dictionary of widgets to be added for the hook to handle extra values.
If you have class hierarchy, usually the widgets needed by your class are already
added by the base class, so there is no need to implement this method. It might
actually result in warning in the logs if you try to add widgets that have already
been added by the base class.
Note that values of Dict should be of wtforms.Field type. It's not added here
for the efficiency of imports.
"""
...
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""
Attributes of the UI field.
Returns dictionary describing customizations to implement in javascript handling the
connection form. Should be compliant with airflow/customized_form_field_behaviours.schema.json'
If you change conn_type in a derived class, you should also
implement this method and return field customizations appropriate to your Hook. This
is because the child hook will have usually different conn_type and the customizations
are per connection type.
.. seealso::
:class:`~airflow.providers.google.cloud.hooks.compute_ssh.ComputeSSH` as an example
"""
...
| 6,459 | 35.089385 | 107 |
py
|
airflow
|
airflow-main/airflow/hooks/subprocess.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import os
import signal
from collections import namedtuple
from subprocess import PIPE, STDOUT, Popen
from tempfile import TemporaryDirectory, gettempdir
from airflow.hooks.base import BaseHook
SubprocessResult = namedtuple("SubprocessResult", ["exit_code", "output"])
class SubprocessHook(BaseHook):
"""Hook for running processes with the ``subprocess`` module."""
def __init__(self) -> None:
self.sub_process: Popen[bytes] | None = None
super().__init__()
def run_command(
self,
command: list[str],
env: dict[str, str] | None = None,
output_encoding: str = "utf-8",
cwd: str | None = None,
) -> SubprocessResult:
"""
Execute the command.
If ``cwd`` is None, execute the command in a temporary directory which will be cleaned afterwards.
If ``env`` is not supplied, ``os.environ`` is passed
:param command: the command to run
:param env: Optional dict containing environment variables to be made available to the shell
environment in which ``command`` will be executed. If omitted, ``os.environ`` will be used.
Note, that in case you have Sentry configured, original variables from the environment
will also be passed to the subprocess with ``SUBPROCESS_`` prefix. See
:doc:`/administration-and-deployment/logging-monitoring/errors` for details.
:param output_encoding: encoding to use for decoding stdout
:param cwd: Working directory to run the command in.
If None (default), the command is run in a temporary directory.
:return: :class:`namedtuple` containing ``exit_code`` and ``output``, the last line from stderr
or stdout
"""
self.log.info("Tmp dir root location: %s", gettempdir())
with contextlib.ExitStack() as stack:
if cwd is None:
cwd = stack.enter_context(TemporaryDirectory(prefix="airflowtmp"))
def pre_exec():
# Restore default signal disposition and invoke setsid
for sig in ("SIGPIPE", "SIGXFZ", "SIGXFSZ"):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
self.log.info("Running command: %s", command)
self.sub_process = Popen(
command,
stdout=PIPE,
stderr=STDOUT,
cwd=cwd,
env=env if env or env == {} else os.environ,
preexec_fn=pre_exec,
)
self.log.info("Output:")
line = ""
if self.sub_process is None:
raise RuntimeError("The subprocess should be created here and is None!")
if self.sub_process.stdout is not None:
for raw_line in iter(self.sub_process.stdout.readline, b""):
line = raw_line.decode(output_encoding, errors="backslashreplace").rstrip()
self.log.info("%s", line)
self.sub_process.wait()
self.log.info("Command exited with return code %s", self.sub_process.returncode)
return_code: int = self.sub_process.returncode
return SubprocessResult(exit_code=return_code, output=line)
def send_sigterm(self):
"""Sends SIGTERM signal to ``self.sub_process`` if one exists."""
self.log.info("Sending SIGTERM signal to process group")
if self.sub_process and hasattr(self.sub_process, "pid"):
os.killpg(os.getpgid(self.sub_process.pid), signal.SIGTERM)
| 4,484 | 40.915888 | 106 |
py
|
airflow
|
airflow-main/airflow/hooks/filesystem.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.hooks.base import BaseHook
class FSHook(BaseHook):
"""
Allows for interaction with an file server.
Connection should have a name and a path specified under extra:
example:
Connection Id: fs_test
Connection Type: File (path)
Host, Schema, Login, Password, Port: empty
Extra: {"path": "/tmp"}
"""
def __init__(self, conn_id: str = "fs_default"):
super().__init__()
conn = self.get_connection(conn_id)
self.basepath = conn.extra_dejson.get("path", "")
self.conn = conn
def get_conn(self) -> None:
pass
def get_path(self) -> str:
"""
Get the path to the filesystem location.
:return: the path.
"""
return self.basepath
| 1,587 | 29.538462 | 67 |
py
|
airflow
|
airflow-main/airflow/hooks/dbapi.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.providers.common.sql.hooks.sql import (
ConnectorProtocol, # noqa
DbApiHook, # noqa
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.common.sql.hooks.sql`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
| 1,158 | 34.121212 | 86 |
py
|
airflow
|
airflow-main/airflow/hooks/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# fmt:, off
"""Hooks."""
from __future__ import annotations
from airflow.utils.deprecation_tools import add_deprecated_classes
__deprecated_classes = {
"S3_hook": {
"S3Hook": "airflow.providers.amazon.aws.hooks.s3.S3Hook",
"provide_bucket_name": "airflow.providers.amazon.aws.hooks.s3.provide_bucket_name",
},
"base_hook": {
"BaseHook": "airflow.hooks.base.BaseHook",
},
"dbapi_hook": {
"DbApiHook": "airflow.providers.common.sql.hooks.sql.DbApiHook",
},
"docker_hook": {
"DockerHook": "airflow.providers.docker.hooks.docker.DockerHook",
},
"druid_hook": {
"DruidDbApiHook": "airflow.providers.apache.druid.hooks.druid.DruidDbApiHook",
"DruidHook": "airflow.providers.apache.druid.hooks.druid.DruidHook",
},
"hive_hooks": {
"HIVE_QUEUE_PRIORITIES": "airflow.providers.apache.hive.hooks.hive.HIVE_QUEUE_PRIORITIES",
"HiveCliHook": "airflow.providers.apache.hive.hooks.hive.HiveCliHook",
"HiveMetastoreHook": "airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook",
"HiveServer2Hook": "airflow.providers.apache.hive.hooks.hive.HiveServer2Hook",
},
"http_hook": {
"HttpHook": "airflow.providers.http.hooks.http.HttpHook",
},
"jdbc_hook": {
"JdbcHook": "airflow.providers.jdbc.hooks.jdbc.JdbcHook",
"jaydebeapi": "airflow.providers.jdbc.hooks.jdbc.jaydebeapi",
},
"mssql_hook": {
"MsSqlHook": "airflow.providers.microsoft.mssql.hooks.mssql.MsSqlHook",
},
"mysql_hook": {
"MySqlHook": "airflow.providers.mysql.hooks.mysql.MySqlHook",
},
"oracle_hook": {
"OracleHook": "airflow.providers.oracle.hooks.oracle.OracleHook",
},
"pig_hook": {
"PigCliHook": "airflow.providers.apache.pig.hooks.pig.PigCliHook",
},
"postgres_hook": {
"PostgresHook": "airflow.providers.postgres.hooks.postgres.PostgresHook",
},
"presto_hook": {
"PrestoHook": "airflow.providers.presto.hooks.presto.PrestoHook",
},
"samba_hook": {
"SambaHook": "airflow.providers.samba.hooks.samba.SambaHook",
},
"slack_hook": {
"SlackHook": "airflow.providers.slack.hooks.slack.SlackHook",
},
"sqlite_hook": {
"SqliteHook": "airflow.providers.sqlite.hooks.sqlite.SqliteHook",
},
"webhdfs_hook": {
"WebHDFSHook": "airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook",
},
"zendesk_hook": {
"ZendeskHook": "airflow.providers.zendesk.hooks.zendesk.ZendeskHook",
},
}
add_deprecated_classes(__deprecated_classes, __name__)
| 3,422 | 36.615385 | 98 |
py
|
airflow
|
airflow-main/airflow/jobs/backfill_job_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import time
from typing import TYPE_CHECKING, Any, Iterable, Iterator, Mapping, Sequence
import attr
import pendulum
from sqlalchemy import select, update
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import Session, make_transient
from tabulate import tabulate
from airflow import models
from airflow.exceptions import (
AirflowException,
BackfillUnfinished,
DagConcurrencyLimitReached,
NoAvailablePoolSlot,
PoolNotFound,
TaskConcurrencyLimitReached,
)
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job_runner import BaseJobRunner
from airflow.jobs.job import Job, perform_heartbeat
from airflow.models import DAG, DagPickle
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import BACKFILL_QUEUED_DEPS
from airflow.timetables.base import DagRunInfo
from airflow.utils import helpers, timezone
from airflow.utils.configuration import conf as airflow_conf, tmp_configuration_copy
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.types import DagRunType
if TYPE_CHECKING:
from airflow.executors.base_executor import BaseExecutor
from airflow.models.abstractoperator import AbstractOperator
class BackfillJobRunner(BaseJobRunner[Job], LoggingMixin):
"""
A backfill job runner consists of a dag or subdag for a specific time range.
It triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
job_type = "BackfillJob"
STATES_COUNT_AS_RUNNING = (TaskInstanceState.RUNNING, TaskInstanceState.QUEUED)
@attr.define
class _DagRunTaskStatus:
"""
Internal status of the backfill job.
This class is intended to be instantiated only within a BackfillJobRunner
instance and will track the execution of tasks, e.g. running, skipped,
succeeded, failed, etc. Information about the dag runs related to the
backfill job are also being tracked in this structure, e.g. finished runs, etc.
Any other status related information related to the execution of dag runs / tasks
can be included in this structure since it makes it easier to pass it around.
:param to_run: Tasks to run in the backfill
:param running: Maps running task instance key to task instance object
:param skipped: Tasks that have been skipped
:param succeeded: Tasks that have succeeded so far
:param failed: Tasks that have failed
:param not_ready: Tasks not ready for execution
:param deadlocked: Deadlocked tasks
:param active_runs: Active dag runs at a certain point in time
:param executed_dag_run_dates: Datetime objects for the executed dag runs
:param finished_runs: Number of finished runs so far
:param total_runs: Number of total dag runs able to run
"""
to_run: dict[TaskInstanceKey, TaskInstance] = attr.ib(factory=dict)
running: dict[TaskInstanceKey, TaskInstance] = attr.ib(factory=dict)
skipped: set[TaskInstanceKey] = attr.ib(factory=set)
succeeded: set[TaskInstanceKey] = attr.ib(factory=set)
failed: set[TaskInstanceKey] = attr.ib(factory=set)
not_ready: set[TaskInstanceKey] = attr.ib(factory=set)
deadlocked: set[TaskInstance] = attr.ib(factory=set)
active_runs: list[DagRun] = attr.ib(factory=list)
executed_dag_run_dates: set[pendulum.DateTime] = attr.ib(factory=set)
finished_runs: int = 0
total_runs: int = 0
def __init__(
self,
job: Job,
dag: DAG,
start_date=None,
end_date=None,
mark_success=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
run_backwards=False,
run_at_least_once=False,
continue_on_failures=False,
disable_retry=False,
) -> None:
"""
Create a BackfillJobRunner.
:param dag: DAG object.
:param start_date: start date for the backfill date range.
:param end_date: end date for the backfill date range.
:param mark_success: flag whether to mark the task auto success.
:param donot_pickle: whether pickle
:param ignore_first_depends_on_past: whether to ignore depend on past
:param ignore_task_deps: whether to ignore the task dependency
:param pool: pool to backfill
:param delay_on_limit_secs:
:param verbose:
:param conf: a dictionary which user could pass k-v pairs for backfill
:param rerun_failed_tasks: flag to whether to
auto rerun the failed task in backfill
:param run_backwards: Whether to process the dates from most to least recent
:param run_at_least_once: If true, always run the DAG at least once even
if no logical run exists within the time range.
:param args:
:param kwargs:
"""
super().__init__(job)
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
self.delay_on_limit_secs = delay_on_limit_secs
self.verbose = verbose
self.conf = conf
self.rerun_failed_tasks = rerun_failed_tasks
self.run_backwards = run_backwards
self.run_at_least_once = run_at_least_once
self.continue_on_failures = continue_on_failures
self.disable_retry = disable_retry
def _update_counters(self, ti_status: _DagRunTaskStatus, session: Session) -> None:
"""
Updates the counters per state of the tasks that were running.
Can re-add to tasks to run when required.
:param ti_status: the internal status of the backfill job tasks
"""
tis_to_be_scheduled = []
refreshed_tis = []
TI = TaskInstance
ti_primary_key_to_ti_key = {ti_key.primary: ti_key for ti_key in ti_status.running.keys()}
filter_for_tis = TI.filter_for_tis(list(ti_status.running.values()))
if filter_for_tis is not None:
refreshed_tis = session.scalars(select(TI).where(filter_for_tis)).all()
for ti in refreshed_tis:
# Use primary key to match in memory information
ti_key = ti_primary_key_to_ti_key[ti.key.primary]
if ti.state == TaskInstanceState.SUCCESS:
ti_status.succeeded.add(ti_key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(ti_key)
continue
if ti.state == TaskInstanceState.SKIPPED:
ti_status.skipped.add(ti_key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(ti_key)
continue
if ti.state == TaskInstanceState.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(ti_key)
ti_status.running.pop(ti_key)
continue
# special case: if the task needs to run again put it back
if ti.state == TaskInstanceState.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(ti_key)
ti_status.to_run[ti.key] = ti
# special case: if the task needs to be rescheduled put it back
elif ti.state == TaskInstanceState.UP_FOR_RESCHEDULE:
self.log.warning("Task instance %s is up for reschedule", ti)
ti_status.running.pop(ti_key)
ti_status.to_run[ti.key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside the scope of
# the backfill suddenly.
elif ti.state is None:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti,
)
tis_to_be_scheduled.append(ti)
ti_status.running.pop(ti_key)
ti_status.to_run[ti.key] = ti
# special case: Deferrable task can go from DEFERRED to SCHEDULED;
# when that happens, we need to put it back as in UP_FOR_RESCHEDULE
elif ti.state == TaskInstanceState.SCHEDULED:
self.log.debug("Task instance %s is resumed from deferred state", ti)
ti_status.running.pop(ti_key)
ti_status.to_run[ti.key] = ti
# Batch schedule of task instances
if tis_to_be_scheduled:
filter_for_tis = TI.filter_for_tis(tis_to_be_scheduled)
session.execute(
update(TI)
.where(filter_for_tis)
.values(state=TaskInstanceState.SCHEDULED)
.execution_options(synchronize_session=False)
)
session.flush()
def _manage_executor_state(
self,
running: Mapping[TaskInstanceKey, TaskInstance],
session: Session,
) -> Iterator[tuple[AbstractOperator, str, Sequence[TaskInstance], int]]:
"""
Compare task instances' states with that of the executor.
Expands downstream mapped tasks when necessary.
:param running: dict of key, task to verify
:return: An iterable of expanded TaskInstance per MappedTask
"""
executor = self.job.executor
# TODO: query all instead of refresh from db
for key, value in list(executor.get_event_buffer().items()):
state, info = value
if key not in running:
self.log.warning("%s state %s not in running=%s", key, state, running.values())
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if (
state in (TaskInstanceState.FAILED, TaskInstanceState.SUCCESS)
and ti.state in self.STATES_COUNT_AS_RUNNING
):
msg = (
f"Executor reports task instance {ti} finished ({state}) although the task says its "
f"{ti.state}. Was the task killed externally? Info: {info}"
)
self.log.error(msg)
ti.handle_failure(error=msg)
continue
def _iter_task_needing_expansion() -> Iterator[AbstractOperator]:
from airflow.models.mappedoperator import AbstractOperator
for node in self.dag.get_task(ti.task_id, include_subdags=True).iter_mapped_dependants():
if isinstance(node, AbstractOperator):
yield node
else: # A (mapped) task group. All its children need expansion.
yield from node.iter_tasks()
if ti.state not in self.STATES_COUNT_AS_RUNNING:
# Don't use ti.task; if this task is mapped, that attribute
# would hold the unmapped task. We need to original task here.
for node in _iter_task_needing_expansion():
new_tis, num_mapped_tis = node.expand_mapped_task(ti.run_id, session=session)
yield node, ti.run_id, new_tis, num_mapped_tis
@provide_session
def _get_dag_run(
self,
dagrun_info: DagRunInfo,
dag: DAG,
session: Session = NEW_SESSION,
) -> DagRun | None:
"""
Return an existing dag run for the given run date or create one.
If the max_active_runs limit is reached, this function will return None.
:param dagrun_info: Schedule information for the dag run
:param dag: DAG
:param session: the database session object
:return: a DagRun in state RUNNING or None
"""
run_date = dagrun_info.logical_date
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = bool(dag.timetable.can_be_scheduled and not dag.is_subdag)
current_active_dag_count = dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of an already existing DAG run
# we could find a "scheduled" run instead of a "backfill"
runs = DagRun.find(dag_id=dag.dag_id, execution_date=run_date, session=session)
run: DagRun | None
if runs:
run = runs[0]
if run.state == DagRunState.RUNNING:
respect_dag_max_active_limit = False
# Fixes --conf overwrite for backfills with already existing DagRuns
run.conf = self.conf or {}
# start_date is cleared for existing DagRuns
run.start_date = timezone.utcnow()
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if respect_dag_max_active_limit and current_active_dag_count >= dag.max_active_runs:
return None
run = run or dag.create_dagrun(
execution_date=run_date,
data_interval=dagrun_info.data_interval,
start_date=timezone.utcnow(),
state=DagRunState.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
run_type=DagRunType.BACKFILL_JOB,
creating_job_id=self.job.id,
)
# set required transient field
run.dag = dag
# explicitly mark as backfill and running
run.state = DagRunState.RUNNING
run.run_type = DagRunType.BACKFILL_JOB
run.verify_integrity(session=session)
run.notify_dagrun_state_changed(msg="started")
return run
@provide_session
def _task_instances_for_dag_run(
self,
dag: DAG,
dag_run: DagRun,
session: Session = NEW_SESSION,
) -> dict[TaskInstanceKey, TaskInstance]:
"""
Return a map of task instance keys to task instance objects for the given dag run.
:param dag_run: the dag run to get the tasks from
:param session: the database session object
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db(session=session)
make_transient(dag_run)
dag_run.dag = dag
info = dag_run.task_instance_scheduling_decisions(session=session)
schedulable_tis = info.schedulable_tis
try:
for ti in dag_run.get_task_instances(session=session):
if ti in schedulable_tis:
ti.set_state(TaskInstanceState.SCHEDULED)
if ti.state != TaskInstanceState.REMOVED:
tasks_to_run[ti.key] = ti
session.commit()
except Exception:
session.rollback()
raise
return tasks_to_run
def _log_progress(self, ti_status: _DagRunTaskStatus) -> None:
self.log.info(
"[backfill progress] | finished run %s of %s | tasks waiting: %s | succeeded: %s | "
"running: %s | failed: %s | skipped: %s | deadlocked: %s | not ready: %s",
ti_status.finished_runs,
ti_status.total_runs,
len(ti_status.to_run),
len(ti_status.succeeded),
len(ti_status.running),
len(ti_status.failed),
len(ti_status.skipped),
len(ti_status.deadlocked),
len(ti_status.not_ready),
)
self.log.debug("Finished dag run loop iteration. Remaining tasks %s", ti_status.to_run.values())
def _process_backfill_task_instances(
self,
ti_status: _DagRunTaskStatus,
executor: BaseExecutor,
pickle_id: int | None,
start_date: datetime.datetime | None = None,
*,
session: Session,
) -> list:
"""
Process a set of task instances from a set of DAG runs.
Special handling is done to account for different task instance states
that could be present when running them in a backfill process.
:param ti_status: the internal status of the job
:param executor: the executor to run the task instances
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:param start_date: the start date of the backfill job
:param session: the current session object
:return: the list of execution_dates for the finished dag runs
"""
executed_run_dates = []
is_unit_test = airflow_conf.getboolean("core", "unit_test_mode")
while (len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and len(ti_status.deadlocked) == 0:
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
def _per_task_process(key, ti: TaskInstance, session):
ti.refresh_from_db(lock_for_update=True, session=session)
task = self.dag.get_task(ti.task_id, include_subdags=True)
ti.task = task
self.log.debug("Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == TaskInstanceState.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
elif ti.state == TaskInstanceState.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (TaskInstanceState.FAILED, TaskInstanceState.UPSTREAM_FAILED):
self.log.error("Task instance %s with state %s", ti, ti.state)
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(TaskInstanceState.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (TaskInstanceState.FAILED, TaskInstanceState.UPSTREAM_FAILED):
self.log.error("Task instance %s with state %s", ti, ti.state)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
if self.ignore_first_depends_on_past:
dagrun = ti.get_dagrun(session=session)
ignore_depends_on_past = dagrun.execution_date == (start_date or ti.start_date)
else:
ignore_depends_on_past = False
backfill_context = DepContext(
deps=BACKFILL_QUEUED_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
wait_for_past_depends_before_skipping=False,
flag_upstream_failed=True,
)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context, session=session, verbose=self.verbose
):
if executor.has_task(ti):
self.log.debug("Task Instance %s already in executor waiting for queue to clear", ti)
else:
self.log.debug("Sending %s to executor", ti)
# Skip scheduled state, we are executing immediately
ti.state = TaskInstanceState.QUEUED
ti.queued_by_job_id = self.job.id
ti.queued_dttm = timezone.utcnow()
session.merge(ti)
try:
session.commit()
except OperationalError:
self.log.exception("Failed to commit task state change due to operational error")
session.rollback()
# early exit so the outer loop can retry
return
cfg_path = None
if executor.is_local:
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=False,
pool=self.pool,
cfg_path=cfg_path,
)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
return
if ti.state == TaskInstanceState.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# special case
if ti.state == TaskInstanceState.UP_FOR_RETRY:
self.log.debug("Task instance %s retry period not expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# special case
if ti.state == TaskInstanceState.UP_FOR_RESCHEDULE:
self.log.debug("Task instance %s reschedule period not expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# all remaining tasks
self.log.debug("Adding %s to not_ready", ti)
ti_status.not_ready.add(key)
try:
for task in self.dag.topological_sort(include_subdag_tasks=True):
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
pool = session.scalar(
select(models.Pool).where(models.Pool.pool == task.pool).limit(1)
)
if not pool:
raise PoolNotFound(f"Unknown pool: {task.pool}")
open_slots = pool.open_slots(session=session)
if open_slots <= 0:
raise NoAvailablePoolSlot(
f"Not scheduling since there are {open_slots} open slots in pool {task.pool}"
)
num_running_task_instances_in_dag = DAG.get_num_task_instances(
self.dag_id,
states=self.STATES_COUNT_AS_RUNNING,
session=session,
)
if num_running_task_instances_in_dag >= self.dag.max_active_tasks:
raise DagConcurrencyLimitReached(
"Not scheduling since DAG max_active_tasks limit is reached."
)
if task.max_active_tis_per_dag is not None:
num_running_task_instances_in_task = DAG.get_num_task_instances(
dag_id=self.dag_id,
task_ids=[task.task_id],
states=self.STATES_COUNT_AS_RUNNING,
session=session,
)
if num_running_task_instances_in_task >= task.max_active_tis_per_dag:
raise TaskConcurrencyLimitReached(
"Not scheduling since Task concurrency limit is reached."
)
if task.max_active_tis_per_dagrun is not None:
num_running_task_instances_in_task_dagrun = DAG.get_num_task_instances(
dag_id=self.dag_id,
run_id=ti.run_id,
task_ids=[task.task_id],
states=self.STATES_COUNT_AS_RUNNING,
session=session,
)
if num_running_task_instances_in_task_dagrun >= task.max_active_tis_per_dagrun:
raise TaskConcurrencyLimitReached(
"Not scheduling since Task concurrency per DAG run limit is reached."
)
_per_task_process(key, ti, session)
session.commit()
except (NoAvailablePoolSlot, DagConcurrencyLimitReached, TaskConcurrencyLimitReached) as e:
self.log.debug(e)
perform_heartbeat(
job=self.job, heartbeat_callback=self.heartbeat_callback, only_if_necessary=is_unit_test
)
# execute the tasks in the queue
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (
ti_status.not_ready
and ti_status.not_ready == set(ti_status.to_run)
and len(ti_status.running) == 0
):
self.log.warning("Deadlock discovered for ti_status.to_run=%s", ti_status.to_run.values())
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state -- and expand any mapped TIs
for node, run_id, new_mapped_tis, max_map_index in self._manage_executor_state(
ti_status.running, session
):
def to_keep(key: TaskInstanceKey) -> bool:
if key.dag_id != node.dag_id or key.task_id != node.task_id or key.run_id != run_id:
# For another Dag/Task/Run -- don't remove
return True
return 0 <= key.map_index <= max_map_index
# remove the old unmapped TIs for node -- they have been replaced with the mapped TIs
ti_status.to_run = {key: ti for (key, ti) in ti_status.to_run.items() if to_keep(key)}
ti_status.to_run.update({ti.key: ti for ti in new_mapped_tis})
for new_ti in new_mapped_tis:
new_ti.set_state(TaskInstanceState.SCHEDULED, session=session)
# Set state to failed for running TIs that are set up for retry if disable-retry flag is set
for ti in ti_status.running.values():
if self.disable_retry and ti.state == TaskInstanceState.UP_FOR_RETRY:
ti.set_state(TaskInstanceState.FAILED, session=session)
# update the task counters
self._update_counters(ti_status=ti_status, session=session)
session.commit()
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished_dr_states:
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
session.commit()
# return updated status
return executed_run_dates
@provide_session
def _collect_errors(self, ti_status: _DagRunTaskStatus, session: Session = NEW_SESSION) -> Iterator[str]:
def tabulate_ti_keys_set(ti_keys: Iterable[TaskInstanceKey]) -> str:
# Sorting by execution date first
sorted_ti_keys: Any = sorted(
ti_keys,
key=lambda ti_key: (
ti_key.run_id,
ti_key.dag_id,
ti_key.task_id,
ti_key.map_index,
ti_key.try_number,
),
)
if all(key.map_index == -1 for key in ti_keys):
headers = ["DAG ID", "Task ID", "Run ID", "Try number"]
sorted_ti_keys = map(lambda k: k[0:4], sorted_ti_keys)
else:
headers = ["DAG ID", "Task ID", "Run ID", "Map Index", "Try number"]
return tabulate(sorted_ti_keys, headers=headers)
if ti_status.failed:
yield "Some task instances failed:\n"
yield tabulate_ti_keys_set(ti_status.failed)
if ti_status.deadlocked:
yield "BackfillJob is deadlocked."
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=self.verbose,
)
!= t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True), session=session, verbose=self.verbose
)
for t in ti_status.deadlocked
)
if deadlocked_depends_on_past:
yield (
"Some of the deadlocked tasks were unable to run because "
'of "depends_on_past" relationships. Try running the '
"backfill with the option "
'"ignore_first_depends_on_past=True" or passing "-I" at '
"the command line."
)
yield "\nThese tasks have succeeded:\n"
yield tabulate_ti_keys_set(ti_status.succeeded)
yield "\n\nThese tasks are running:\n"
yield tabulate_ti_keys_set(ti_status.running)
yield "\n\nThese tasks have failed:\n"
yield tabulate_ti_keys_set(ti_status.failed)
yield "\n\nThese tasks are skipped:\n"
yield tabulate_ti_keys_set(ti_status.skipped)
yield "\n\nThese tasks are deadlocked:\n"
yield tabulate_ti_keys_set([ti.key for ti in ti_status.deadlocked])
def _get_dag_with_subdags(self) -> list[DAG]:
return [self.dag] + self.dag.subdags
@provide_session
def _execute_dagruns(
self,
dagrun_infos: Iterable[DagRunInfo],
ti_status: _DagRunTaskStatus,
executor: BaseExecutor,
pickle_id: int | None,
start_date: datetime.datetime | None,
session: Session = NEW_SESSION,
) -> None:
"""
Compute and execute dag runs and their respective task instances for the given dates.
Returns a list of execution dates of the dag runs that were executed.
:param dagrun_infos: Schedule information for dag runs
:param ti_status: internal BackfillJobRunner status structure to tis track progress
:param executor: the executor to use, it must be previously started
:param pickle_id: numeric id of the pickled dag, None if not pickled
:param start_date: backfill start date
:param session: the current session object
"""
for dagrun_info in dagrun_infos:
for dag in self._get_dag_with_subdags():
dag_run = self._get_dag_run(dagrun_info, dag, session=session)
if dag_run is None:
continue
tis_map = self._task_instances_for_dag_run(dag, dag_run, session=session)
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session,
)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
@provide_session
def _set_unfinished_dag_runs_to_failed(
self,
dag_runs: Iterable[DagRun],
session: Session = NEW_SESSION,
) -> None:
"""
Update the state of each dagrun based on the task_instance state and set unfinished runs to failed.
:param dag_runs: DAG runs
:param session: session
:return: None
"""
for dag_run in dag_runs:
dag_run.update_state()
if dag_run.state not in State.finished_dr_states:
dag_run.set_state(DagRunState.FAILED)
session.merge(dag_run)
@provide_session
def _execute(self, session: Session = NEW_SESSION) -> None:
"""
Initialize all required components of a dag for a specified date range and execute the tasks.
:meta private:
"""
ti_status = BackfillJobRunner._DagRunTaskStatus()
start_date = self.bf_start_date
# Get DagRun schedule between the start/end dates, which will turn into dag runs.
dagrun_start_date = timezone.coerce_datetime(start_date)
if self.bf_end_date is None:
dagrun_end_date = pendulum.now(timezone.utc)
else:
dagrun_end_date = pendulum.instance(self.bf_end_date)
dagrun_infos = list(self.dag.iter_dagrun_infos_between(dagrun_start_date, dagrun_end_date))
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException(
f"You cannot backfill backwards because one or more "
f'tasks depend_on_past: {",".join(tasks_that_depend_on_past)}'
)
dagrun_infos = dagrun_infos[::-1]
if not dagrun_infos:
if not self.run_at_least_once:
self.log.info("No run dates were found for the given dates and dag interval.")
return
dagrun_infos = [DagRunInfo.interval(dagrun_start_date, dagrun_end_date)]
dag_with_subdags_ids = [d.dag_id for d in self._get_dag_with_subdags()]
running_dagruns = DagRun.find(
dag_id=dag_with_subdags_ids,
execution_start_date=self.bf_start_date,
execution_end_date=self.bf_end_date,
no_backfills=True,
state=DagRunState.RUNNING,
)
if running_dagruns:
for run in running_dagruns:
self.log.error(
"Backfill cannot be created for DagRun %s in %s, as there's already %s in a RUNNING "
"state.",
run.run_id,
run.execution_date.strftime("%Y-%m-%dT%H:%M:%S"),
run.run_type,
)
self.log.error(
"Changing DagRun into BACKFILL would cause scheduler to lose track of executing "
"tasks. Not changing DagRun type into BACKFILL, and trying insert another DagRun into "
"database would cause database constraint violation for dag_id + execution_date "
"combination. Please adjust backfill dates or wait for this DagRun to finish.",
)
return
# picklin'
pickle_id = None
executor_class, _ = ExecutorLoader.import_default_executor_cls()
if not self.donot_pickle and executor_class.supports_pickling:
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.job.executor
executor.job_id = self.job.id
executor.start()
ti_status.total_runs = len(dagrun_infos) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dagrun_infos_to_process = [
dagrun_info
for dagrun_info in dagrun_infos
if dagrun_info.logical_date not in ti_status.executed_dag_run_dates
]
self._execute_dagruns(
dagrun_infos=dagrun_infos_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session,
)
remaining_dates = ti_status.total_runs - len(ti_status.executed_dag_run_dates)
err = "".join(self._collect_errors(ti_status=ti_status, session=session))
if err:
if not self.continue_on_failures or ti_status.deadlocked:
raise BackfillUnfinished(err, ti_status)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id,
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done for DAG %s. Exiting.", self.dag)
@provide_session
def reset_state_for_orphaned_tasks(
self,
filter_by_dag_run: DagRun | None = None,
session: Session = NEW_SESSION,
) -> int | None:
"""
Reset state of orphaned tasks.
This function checks if there are any tasks in the dagrun (or all) that
have a schedule or queued states but are not known by the executor. If
it finds those it will reset the state to None so they will get picked
up again. The batch option is for performance reasons as the queries
are made in sequence.
:param filter_by_dag_run: the dag_run we want to process, None if all
:return: the number of TIs reset
"""
queued_tis = self.job.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running_tis = self.job.executor.running
# Can't use an update here since it doesn't support joins.
resettable_states = [TaskInstanceState.SCHEDULED, TaskInstanceState.QUEUED]
if filter_by_dag_run is None:
resettable_tis = (
session.scalars(
select(TaskInstance)
.join(TaskInstance.dag_run)
.where(
DagRun.state == DagRunState.RUNNING,
DagRun.run_type != DagRunType.BACKFILL_JOB,
TaskInstance.state.in_(resettable_states),
)
)
).all()
else:
resettable_tis = filter_by_dag_run.get_task_instances(state=resettable_states, session=session)
tis_to_reset = [ti for ti in resettable_tis if ti.key not in queued_tis and ti.key not in running_tis]
if not tis_to_reset:
return 0
def query(result, items):
if not items:
return result
filter_for_tis = TaskInstance.filter_for_tis(items)
reset_tis = session.scalars(
select(TaskInstance)
.where(filter_for_tis, TaskInstance.state.in_(resettable_states))
.with_for_update()
).all()
for ti in reset_tis:
ti.state = None
session.merge(ti)
return result + reset_tis
reset_tis = helpers.reduce_in_chunks(query, tis_to_reset, [], self.job.max_tis_per_query)
task_instance_str = "\n\t".join(repr(x) for x in reset_tis)
session.flush()
self.log.info("Reset the following %s TaskInstances:\n\t%s", len(reset_tis), task_instance_str)
return len(reset_tis)
| 43,863 | 42.215764 | 110 |
py
|
airflow
|
airflow-main/airflow/jobs/scheduler_job_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import time
import warnings
from collections import Counter
from dataclasses import dataclass
from datetime import datetime, timedelta
from functools import lru_cache, partial
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Collection, Iterable, Iterator
from sqlalchemy import and_, delete, func, not_, or_, select, text, update
from sqlalchemy.engine import Result
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import Query, Session, load_only, make_transient, selectinload
from sqlalchemy.sql import expression
from airflow import settings
from airflow.callbacks.callback_requests import DagCallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.callbacks.pipe_callback_sink import PipeCallbackSink
from airflow.configuration import conf
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job_runner import BaseJobRunner
from airflow.jobs.job import Job, perform_heartbeat
from airflow.models.dag import DAG, DagModel
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.dataset import (
DagScheduleDatasetReference,
DatasetDagRunQueue,
DatasetEvent,
DatasetModel,
TaskOutletDatasetReference,
)
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.timetables.simple import DatasetTriggeredTimetable
from airflow.utils import timezone
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.retries import MAX_DB_RETRIES, retry_db_transaction, run_with_db_retries
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.sqlalchemy import (
CommitProhibitorGuard,
is_lock_not_available_error,
prohibit_commit,
skip_locked,
tuple_in_condition,
with_row_locks,
)
from airflow.utils.state import DagRunState, JobState, State, TaskInstanceState
from airflow.utils.types import DagRunType
if TYPE_CHECKING:
from types import FrameType
from airflow.dag_processing.manager import DagFileProcessorAgent
TI = TaskInstance
DR = DagRun
DM = DagModel
@dataclass
class ConcurrencyMap:
"""
Dataclass to represent concurrency maps.
It contains a map from (dag_id, task_id) to # of task instances, a map from (dag_id, task_id)
to # of task instances in the given state list and a map from (dag_id, run_id, task_id)
to # of task instances in the given state list in each DAG run.
"""
dag_active_tasks_map: dict[str, int]
task_concurrency_map: dict[tuple[str, str], int]
task_dagrun_concurrency_map: dict[tuple[str, str, str], int]
@classmethod
def from_concurrency_map(cls, mapping: dict[tuple[str, str, str], int]) -> ConcurrencyMap:
instance = cls(Counter(), Counter(), Counter(mapping))
for (d, r, t), c in mapping.items():
instance.dag_active_tasks_map[d] += c
instance.task_concurrency_map[(d, t)] += c
return instance
def _is_parent_process() -> bool:
"""
Whether this is a parent process.
Return True if the current process is the parent process.
False if the current process is a child process started by multiprocessing.
"""
return multiprocessing.current_process().name == "MainProcess"
class SchedulerJobRunner(BaseJobRunner[Job], LoggingMixin):
"""
SchedulerJobRunner runs for a specific time interval and schedules jobs that are ready to run.
It figures out the latest runs for each task and sees if the dependencies
for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:param scheduler_idle_sleep_time: The number of seconds to wait between
polls of running processors
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:param log: override the default Logger
"""
job_type = "SchedulerJob"
heartrate: int = conf.getint("scheduler", "SCHEDULER_HEARTBEAT_SEC")
def __init__(
self,
job: Job,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint("scheduler", "num_runs"),
num_times_parse_dags: int = -1,
scheduler_idle_sleep_time: float = conf.getfloat("scheduler", "scheduler_idle_sleep_time"),
do_pickle: bool = False,
log: logging.Logger | None = None,
processor_poll_interval: float | None = None,
):
super().__init__(job)
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
if processor_poll_interval:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The 'processor_poll_interval' parameter is deprecated. "
"Please use 'scheduler_idle_sleep_time'.",
RemovedInAirflow3Warning,
stacklevel=2,
)
scheduler_idle_sleep_time = processor_poll_interval
self._scheduler_idle_sleep_time = scheduler_idle_sleep_time
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint("scheduler", "scheduler_zombie_task_threshold")
self._standalone_dag_processor = conf.getboolean("scheduler", "standalone_dag_processor")
self._dag_stale_not_seen_duration = conf.getint("scheduler", "dag_stale_not_seen_duration")
# Since the functionality for stalled_task_timeout, task_adoption_timeout, and
# worker_pods_pending_timeout are now handled by a single config (task_queued_timeout),
# we can't deprecate them as we normally would. So, we'll read each config and take
# the max value in order to ensure we're not undercutting a legitimate
# use of any of these configs.
stalled_task_timeout = conf.getfloat("celery", "stalled_task_timeout", fallback=0)
if stalled_task_timeout:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The '[celery] stalled_task_timeout' config option is deprecated. "
"Please update your config to use '[scheduler] task_queued_timeout' instead.",
DeprecationWarning,
)
task_adoption_timeout = conf.getfloat("celery", "task_adoption_timeout", fallback=0)
if task_adoption_timeout:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The '[celery] task_adoption_timeout' config option is deprecated. "
"Please update your config to use '[scheduler] task_queued_timeout' instead.",
DeprecationWarning,
)
worker_pods_pending_timeout = conf.getfloat(
"kubernetes_executor", "worker_pods_pending_timeout", fallback=0
)
if worker_pods_pending_timeout:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The '[kubernetes_executor] worker_pods_pending_timeout' config option is deprecated. "
"Please update your config to use '[scheduler] task_queued_timeout' instead.",
DeprecationWarning,
)
task_queued_timeout = conf.getfloat("scheduler", "task_queued_timeout")
self._task_queued_timeout = max(
stalled_task_timeout, task_adoption_timeout, worker_pods_pending_timeout, task_queued_timeout
)
self.do_pickle = do_pickle
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get_mandatory_value("database", "sql_alchemy_conn").lower()
self.using_sqlite = sql_conn.startswith("sqlite")
# Dag Processor agent - not used in Dag Processor standalone mode.
self.processor_agent: DagFileProcessorAgent | None = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
self._paused_dag_without_running_dagruns: set = set()
@provide_session
def heartbeat_callback(self, session: Session = NEW_SESSION) -> None:
Stats.incr("scheduler_heartbeat", 1, 1)
def register_signals(self) -> None:
"""Register signals that stop child processes."""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum: int, frame: FrameType | None) -> None:
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
if not _is_parent_process():
# Only the parent process should perform the cleanup.
return
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum: int, frame: FrameType | None) -> None:
if not _is_parent_process():
# Only the parent process should perform the debug dump.
return
try:
sig_name = signal.Signals(signum).name
except Exception:
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.job.executor.debug_dump()
self.log.info("-" * 80)
def __get_concurrency_maps(self, states: Iterable[TaskInstanceState], session: Session) -> ConcurrencyMap:
"""
Get the concurrency maps.
:param states: List of states to query for
:return: Concurrency map
"""
ti_concurrency_query: Result = session.execute(
select(TI.task_id, TI.run_id, TI.dag_id, func.count("*"))
.where(TI.state.in_(states))
.group_by(TI.task_id, TI.run_id, TI.dag_id)
)
return ConcurrencyMap.from_concurrency_map(
{(dag_id, run_id, task_id): count for task_id, run_id, dag_id, count in ti_concurrency_query}
)
def _executable_task_instances_to_queued(self, max_tis: int, session: Session) -> list[TI]:
"""
Find TIs that are ready for execution based on conditions.
Conditions include:
- pool limits
- DAG max_active_tasks
- executor state
- priority
- max active tis per DAG
- max active tis per DAG run
:param max_tis: Maximum number of TIs to queue in this loop.
:return: list[airflow.models.TaskInstance]
"""
from airflow.models.pool import Pool
from airflow.utils.db import DBLocks
executable_tis: list[TI] = []
if session.get_bind().dialect.name == "postgresql":
# Optimization: to avoid littering the DB errors of "ERROR: canceling statement due to lock
# timeout", try to take out a transactional advisory lock (unlocks automatically on
# COMMIT/ROLLBACK)
lock_acquired = session.execute(
text("SELECT pg_try_advisory_xact_lock(:id)").bindparams(
id=DBLocks.SCHEDULER_CRITICAL_SECTION.value
)
).scalar()
if not lock_acquired:
# Throw an error like the one that would happen with NOWAIT
raise OperationalError(
"Failed to acquire advisory lock", params=None, orig=RuntimeError("55P03")
)
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = sum(max(0, pool["open"]) for pool in pools.values())
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return []
max_tis = min(max_tis, pool_slots_free)
starved_pools = {pool_name for pool_name, stats in pools.items() if stats["open"] <= 0}
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
concurrency_map = self.__get_concurrency_maps(states=EXECUTION_STATES, session=session)
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# dag and task ids that can't be queued because of concurrency limits
starved_dags: set[str] = set()
starved_tasks: set[tuple[str, str]] = set()
starved_tasks_task_dagrun_concurrency: set[tuple[str, str, str]] = set()
pool_num_starving_tasks: dict[str, int] = Counter()
for loop_count in itertools.count(start=1):
num_starved_pools = len(starved_pools)
num_starved_dags = len(starved_dags)
num_starved_tasks = len(starved_tasks)
num_starved_tasks_task_dagrun_concurrency = len(starved_tasks_task_dagrun_concurrency)
# Get task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
select(TI)
.with_hint(TI, "USE INDEX (ti_state)", dialect_name="mysql")
.join(TI.dag_run)
.where(DR.run_type != DagRunType.BACKFILL_JOB, DR.state == DagRunState.RUNNING)
.join(TI.dag_model)
.where(not_(DM.is_paused))
.where(TI.state == TaskInstanceState.SCHEDULED)
.options(selectinload(TI.dag_model))
.order_by(-TI.priority_weight, DR.execution_date, TI.map_index)
)
if starved_pools:
query = query.where(not_(TI.pool.in_(starved_pools)))
if starved_dags:
query = query.where(not_(TI.dag_id.in_(starved_dags)))
if starved_tasks:
task_filter = tuple_in_condition((TI.dag_id, TI.task_id), starved_tasks)
query = query.where(not_(task_filter))
if starved_tasks_task_dagrun_concurrency:
task_filter = tuple_in_condition(
(TI.dag_id, TI.run_id, TI.task_id),
starved_tasks_task_dagrun_concurrency,
)
query = query.where(not_(task_filter))
query = query.limit(max_tis)
timer = Stats.timer("scheduler.critical_section_query_duration")
timer.start()
try:
query = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
)
task_instances_to_examine: list[TI] = session.scalars(query).all()
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
raise e
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
break
# Put one task instance on each line
task_instance_str = "\n\t".join(repr(x) for x in task_instances_to_examine)
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str
)
for task_instance in task_instances_to_examine:
pool_name = task_instance.pool
pool_stats = pools.get(pool_name)
if not pool_stats:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool_name)
starved_pools.add(pool_name)
continue
# Make sure to emit metrics if pool has no starving tasks
pool_num_starving_tasks.setdefault(pool_name, 0)
pool_total = pool_stats["total"]
open_slots = pool_stats["open"]
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s", open_slots, pool_name
)
# Can't schedule any more since there are no more open slots.
pool_num_starving_tasks[pool_name] += 1
num_starving_tasks_total += 1
starved_pools.add(pool_name)
continue
if task_instance.pool_slots > pool_total:
self.log.warning(
"Not executing %s. Requested pool slots (%s) are greater than "
"total pool slots: '%s' for pool: %s.",
task_instance,
task_instance.pool_slots,
pool_total,
pool_name,
)
pool_num_starving_tasks[pool_name] += 1
num_starving_tasks_total += 1
starved_tasks.add((task_instance.dag_id, task_instance.task_id))
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool_name,
)
pool_num_starving_tasks[pool_name] += 1
num_starving_tasks_total += 1
starved_tasks.add((task_instance.dag_id, task_instance.task_id))
# Though we can execute tasks with lower priority if there's enough room
continue
# Check to make sure that the task max_active_tasks of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_active_tasks_per_dag = concurrency_map.dag_active_tasks_map[dag_id]
max_active_tasks_per_dag_limit = task_instance.dag_model.max_active_tasks
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_active_tasks_per_dag,
max_active_tasks_per_dag_limit,
)
if current_active_tasks_per_dag >= max_active_tasks_per_dag_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's max_active_tasks limit of %s",
task_instance,
dag_id,
max_active_tasks_per_dag_limit,
)
starved_dags.add(dag_id)
continue
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
# If the dag is missing, fail the task and continue to the next task.
if not serialized_dag:
self.log.error(
"DAG '%s' for task instance %s not found in serialized_dag table",
dag_id,
task_instance,
)
session.execute(
update(TI)
.where(TI.dag_id == dag_id, TI.state == TaskInstanceState.SCHEDULED)
.values(state=TaskInstanceState.FAILED)
.execution_options(synchronize_session="fetch")
)
continue
task_concurrency_limit: int | None = None
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).max_active_tis_per_dag
if task_concurrency_limit is not None:
current_task_concurrency = concurrency_map.task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
starved_tasks.add((task_instance.dag_id, task_instance.task_id))
continue
task_dagrun_concurrency_limit: int | None = None
if serialized_dag.has_task(task_instance.task_id):
task_dagrun_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).max_active_tis_per_dagrun
if task_dagrun_concurrency_limit is not None:
current_task_dagrun_concurrency = concurrency_map.task_dagrun_concurrency_map[
(task_instance.dag_id, task_instance.run_id, task_instance.task_id)
]
if current_task_dagrun_concurrency >= task_dagrun_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency per DAG run for"
" this task has been reached.",
task_instance,
)
starved_tasks_task_dagrun_concurrency.add(
(task_instance.dag_id, task_instance.run_id, task_instance.task_id)
)
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
concurrency_map.dag_active_tasks_map[dag_id] += 1
concurrency_map.task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
concurrency_map.task_dagrun_concurrency_map[
(task_instance.dag_id, task_instance.run_id, task_instance.task_id)
] += 1
pool_stats["open"] = open_slots
is_done = executable_tis or len(task_instances_to_examine) < max_tis
# Check this to avoid accidental infinite loops
found_new_filters = (
len(starved_pools) > num_starved_pools
or len(starved_dags) > num_starved_dags
or len(starved_tasks) > num_starved_tasks
or len(starved_tasks_task_dagrun_concurrency) > num_starved_tasks_task_dagrun_concurrency
)
if is_done or not found_new_filters:
break
self.log.info(
"Found no task instances to queue on query iteration %s "
"but there could be more candidate task instances to check.",
loop_count,
)
for pool_name, num_starving_tasks in pool_num_starving_tasks.items():
Stats.gauge(f"pool.starving_tasks.{pool_name}", num_starving_tasks)
# Same metric with tagging
Stats.gauge("pool.starving_tasks", num_starving_tasks, tags={"pool_name": pool_name})
Stats.gauge("scheduler.tasks.starving", num_starving_tasks_total)
Stats.gauge("scheduler.tasks.executable", len(executable_tis))
if len(executable_tis) > 0:
task_instance_str = "\n\t".join(repr(x) for x in executable_tis)
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.execute(
update(TI)
.where(filter_for_tis)
.values(
# TODO[ha]: should we use func.now()? How does that work with DB timezone
# on mysql when it's not UTC?
state=TaskInstanceState.QUEUED,
queued_dttm=timezone.utcnow(),
queued_by_job_id=self.job.id,
)
.execution_options(synchronize_session=False)
)
for ti in executable_tis:
ti.emit_state_change_metric(TaskInstanceState.QUEUED)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: list[TI], session: Session) -> None:
"""
Enqueue task_instances which should have been set to queued with the executor.
:param task_instances: TaskInstances to enqueue
:param session: The session object
"""
# actually enqueue them
for ti in task_instances:
if ti.dag_run.state in State.finished_dr_states:
ti.set_state(None, session=session)
continue
command = ti.command_as_list(
local=True,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.job.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_enqueue_task_instances(self, session: Session) -> int:
"""
Enqueues TaskInstances for execution.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do not exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:return: Number of task instance with state changed.
"""
if self.job.max_tis_per_query == 0:
max_tis = self.job.executor.slots_available
else:
max_tis = min(self.job.max_tis_per_query, self.job.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis, session=session)
return len(queued_tis)
def _process_executor_events(self, session: Session) -> int:
"""Respond to executor events."""
if not self._standalone_dag_processor and not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: dict[tuple[str, str, str, int], int] = {}
event_buffer = self.job.executor.get_event_buffer()
tis_with_right_state: list[TaskInstanceKey] = []
# Report execution
for ti_key, (state, _) in event_buffer.items():
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info("Received executor event with state %s for task instance %s", state, ti_key)
if state in (TaskInstanceState.FAILED, TaskInstanceState.SUCCESS, TaskInstanceState.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
query = select(TI).where(filter_for_tis).options(selectinload(TI.dag_model))
# row lock this entire set of taskinstances to make sure the scheduler doesn't fail when we have
# multi-schedulers
tis: Iterator[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
)
tis = session.scalars(tis)
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
if state == TaskInstanceState.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
msg = (
"TaskInstance Finished: dag_id=%s, task_id=%s, run_id=%s, map_index=%s, "
"run_start_date=%s, run_end_date=%s, "
"run_duration=%s, state=%s, executor_state=%s, try_number=%s, max_tries=%s, job_id=%s, "
"pool=%s, queue=%s, priority_weight=%d, operator=%s, queued_dttm=%s, "
"queued_by_job_id=%s, pid=%s"
)
self.log.info(
msg,
ti.dag_id,
ti.task_id,
ti.run_id,
ti.map_index,
ti.start_date,
ti.end_date,
ti.duration,
ti.state,
state,
try_number,
ti.max_tries,
ti.job_id,
ti.pool,
ti.queue,
ti.priority_weight,
ti.operator,
ti.queued_dttm,
ti.queued_by_job_id,
ti.pid,
)
# There are two scenarios why the same TI with the same try_number is queued
# after executor is finished with it:
# 1) the TI was killed externally and it had no time to mark itself failed
# - in this case we should mark it as failed here.
# 2) the TI has been requeued after getting deferred - in this case either our executor has it
# or the TI is queued by another job. Either ways we should not fail it.
# All of this could also happen if the state is "running",
# but that is handled by the zombie detection.
ti_queued = ti.try_number == buffer_key.try_number and ti.state == TaskInstanceState.QUEUED
ti_requeued = (
ti.queued_by_job_id != self.job.id # Another scheduler has queued this task again
or self.job.executor.has_task(ti) # This scheduler has this task already
)
if ti_queued and not ti_requeued:
Stats.incr(
"scheduler.tasks.killed_externally",
tags={"dag_id": ti.dag_id, "task_id": ti.task_id},
)
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says it's %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
# Get task from the Serialized DAG
try:
dag = self.dagbag.get_dag(ti.dag_id)
task = dag.get_task(ti.task_id)
except Exception:
self.log.exception("Marking task instance %s as %s", ti, state)
ti.set_state(state)
continue
ti.task = task
if task.on_retry_callback or task.on_failure_callback:
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance.from_ti(ti),
msg=msg % (ti, state, ti.state, info),
processor_subdir=ti.dag_model.processor_subdir,
)
self.job.executor.send_callback(request)
else:
ti.handle_failure(error=msg % (ti, state, ti.state, info), session=session)
return len(event_buffer)
def _execute(self) -> int | None:
from airflow.dag_processing.manager import DagFileProcessorAgent
self.log.info("Starting the scheduler")
executor_class, _ = ExecutorLoader.import_default_executor_cls()
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and executor_class.supports_pickling
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint("core", "dag_file_processor_timeout")
processor_timeout = timedelta(seconds=processor_timeout_seconds)
if not self._standalone_dag_processor:
self.processor_agent = DagFileProcessorAgent(
dag_directory=Path(self.subdir),
max_runs=self.num_times_parse_dags,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.job.executor.job_id = self.job.id
if self.processor_agent:
self.log.debug("Using PipeCallbackSink as callback sink.")
self.job.executor.callback_sink = PipeCallbackSink(
get_sink_pipe=self.processor_agent.get_callbacks_pipe
)
else:
from airflow.callbacks.database_callback_sink import DatabaseCallbackSink
self.log.debug("Using DatabaseCallbackSink as callback sink.")
self.job.executor.callback_sink = DatabaseCallbackSink()
self.job.executor.start()
self.register_signals()
if self.processor_agent:
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
if self.processor_agent:
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
DAG.deactivate_stale_dags(execute_start_time)
settings.Session.remove() # type: ignore
except Exception:
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
raise
finally:
try:
self.job.executor.end()
except Exception:
self.log.exception("Exception when executing Executor.end")
if self.processor_agent:
try:
self.processor_agent.end()
except Exception:
self.log.exception("Exception when executing DagFileProcessorAgent.end")
self.log.info("Exited execute loop")
return None
@provide_session
def _update_dag_run_state_for_paused_dags(self, session: Session = NEW_SESSION) -> None:
try:
paused_runs = session.scalars(
select(DagRun)
.join(DagRun.dag_model)
.join(TI)
.where(
DagModel.is_paused == expression.true(),
DagRun.state == DagRunState.RUNNING,
DagRun.run_type != DagRunType.BACKFILL_JOB,
)
.having(DagRun.last_scheduling_decision <= func.max(TI.updated_at))
.group_by(DagRun)
)
for dag_run in paused_runs:
dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if dag is None:
continue
dag_run.dag = dag
_, callback_to_run = dag_run.update_state(execute_callbacks=False, session=session)
if callback_to_run:
self._send_dag_callbacks_to_processor(dag, callback_to_run)
except Exception as e: # should not fail the scheduler
self.log.exception("Failed to update dag run state for paused dags due to %s", str(e))
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop.
The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
"""
if not self.processor_agent and not self._standalone_dag_processor:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean("core", "unit_test_mode")
timers = EventScheduler()
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
timers.call_regular_interval(
conf.getfloat("scheduler", "orphaned_tasks_check_interval", fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
timers.call_regular_interval(
conf.getfloat("scheduler", "trigger_timeout_check_interval", fallback=15.0),
self.check_trigger_timeouts,
)
timers.call_regular_interval(
conf.getfloat("scheduler", "pool_metrics_interval", fallback=5.0),
self._emit_pool_metrics,
)
timers.call_regular_interval(
conf.getfloat("scheduler", "zombie_detection_interval", fallback=10.0),
self._find_zombies,
)
timers.call_regular_interval(60.0, self._update_dag_run_state_for_paused_dags)
timers.call_regular_interval(
conf.getfloat("scheduler", "task_queued_timeout_check_interval"),
self._fail_tasks_stuck_in_queued,
)
timers.call_regular_interval(
conf.getfloat("scheduler", "parsing_cleanup_interval"),
self._orphan_unreferenced_datasets,
)
if self._standalone_dag_processor:
timers.call_regular_interval(
conf.getfloat("scheduler", "parsing_cleanup_interval"),
self._cleanup_stale_dags,
)
for loop_count in itertools.count(start=1):
with Stats.timer("scheduler.scheduler_loop_duration") as timer:
if self.using_sqlite and self.processor_agent:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.job.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
if self.processor_agent:
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
perform_heartbeat(
job=self.job, heartbeat_callback=self.heartbeat_callback, only_if_necessary=True
)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._scheduler_idle_sleep_time, next_event if next_event else 0))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent and self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
def _do_scheduling(self, session: Session) -> int:
"""
This function is where the main scheduling decisions take places.
It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
We don't select all dagruns at once, because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_enqueue_task_instances for more.
:return: Number of TIs enqueued in this iteration
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
self._start_queued_dagruns(session)
guard.commit()
dag_runs = self._get_next_dagruns_to_examine(DagRunState.RUNNING, session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
callback_tuples = self._schedule_all_dag_runs(guard, dag_runs, session)
# Send the callbacks after we commit to ensure the context is up to date when it gets run
# cache saves time during scheduling of many dag_runs for same dag
cached_get_dag: Callable[[str], DAG | None] = lru_cache()(
partial(self.dagbag.get_dag, session=session)
)
for dag_run, callback_to_run in callback_tuples:
dag = cached_get_dag(dag_run.dag_id)
if not dag:
self.log.error("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
# Sending callbacks there as in standalone_dag_processor they are adding to the database,
# so it must be done outside of prohibit_commit.
self._send_dag_callbacks_to_processor(dag, callback_to_run)
with prohibit_commit(session) as guard:
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
if self.job.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
num_queued_tis = 0
else:
try:
timer = Stats.timer("scheduler.critical_section_duration")
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_enqueue_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr("scheduler.critical_section_busy")
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
@retry_db_transaction
def _get_next_dagruns_to_examine(self, state: DagRunState, session: Session) -> Query:
"""Get Next DagRuns to Examine with retries."""
return DagRun.next_dagruns_to_examine(state, session)
@retry_db_transaction
def _create_dagruns_for_dags(self, guard: CommitProhibitorGuard, session: Session) -> None:
"""Find Dag Models needing DagRuns and Create Dag Runs with retries in case of OperationalError."""
query, dataset_triggered_dag_info = DagModel.dags_needing_dagruns(session)
all_dags_needing_dag_runs = set(query.all())
dataset_triggered_dags = [
dag for dag in all_dags_needing_dag_runs if dag.dag_id in dataset_triggered_dag_info
]
non_dataset_dags = all_dags_needing_dag_runs.difference(dataset_triggered_dags)
self._create_dag_runs(non_dataset_dags, session)
if dataset_triggered_dags:
self._create_dag_runs_dataset_triggered(
dataset_triggered_dags, dataset_triggered_dag_info, session
)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
def _create_dag_runs(self, dag_models: Collection[DagModel], session: Session) -> None:
"""Create a DAG run and update the dag_model to control if/when the next DAGRun should be created."""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
existing_dagruns = (
session.execute(
select(DagRun.dag_id, DagRun.execution_date).where(
tuple_in_condition(
(DagRun.dag_id, DagRun.execution_date),
((dm.dag_id, dm.next_dagrun) for dm in dag_models),
),
)
)
.unique()
.all()
)
active_runs_of_dags = Counter(
DagRun.active_runs_of_dags(dag_ids=(dm.dag_id for dm in dag_models), session=session),
)
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
if not dag:
self.log.error("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
data_interval = dag.get_next_data_interval(dag_model)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to set dag.next_dagrun_info if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in existing_dagruns:
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
state=DagRunState.QUEUED,
data_interval=data_interval,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.job.id,
)
active_runs_of_dags[dag.dag_id] += 1
if self._should_update_dag_next_dagruns(
dag, dag_model, active_runs_of_dags[dag.dag_id], session=session
):
dag_model.calculate_dagrun_date_fields(dag, data_interval)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _create_dag_runs_dataset_triggered(
self,
dag_models: Collection[DagModel],
dataset_triggered_dag_info: dict[str, tuple[datetime, datetime]],
session: Session,
) -> None:
"""For DAGs that are triggered by datasets, create dag runs."""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
exec_dates = {
dag_id: timezone.coerce_datetime(last_time)
for dag_id, (_, last_time) in dataset_triggered_dag_info.items()
}
existing_dagruns: set[tuple[str, timezone.DateTime]] = set(
session.execute(
select(DagRun.dag_id, DagRun.execution_date).where(
tuple_in_condition((DagRun.dag_id, DagRun.execution_date), exec_dates.items())
)
)
)
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
if not dag:
self.log.error("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
if not isinstance(dag.timetable, DatasetTriggeredTimetable):
self.log.error(
"DAG '%s' was dataset-scheduled, but didn't have a DatasetTriggeredTimetable!",
dag_model.dag_id,
)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to set dag.next_dagrun_info if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
exec_date = exec_dates[dag.dag_id]
if (dag.dag_id, exec_date) not in existing_dagruns:
previous_dag_run = session.scalar(
select(DagRun)
.where(
DagRun.dag_id == dag.dag_id,
DagRun.execution_date < exec_date,
DagRun.run_type == DagRunType.DATASET_TRIGGERED,
)
.order_by(DagRun.execution_date.desc())
.limit(1)
)
dataset_event_filters = [
DagScheduleDatasetReference.dag_id == dag.dag_id,
DatasetEvent.timestamp <= exec_date,
]
if previous_dag_run:
dataset_event_filters.append(DatasetEvent.timestamp > previous_dag_run.execution_date)
dataset_events = session.scalars(
select(DatasetEvent)
.join(
DagScheduleDatasetReference,
DatasetEvent.dataset_id == DagScheduleDatasetReference.dataset_id,
)
.join(DatasetEvent.source_dag_run)
.where(*dataset_event_filters)
).all()
data_interval = dag.timetable.data_interval_for_events(exec_date, dataset_events)
run_id = dag.timetable.generate_run_id(
run_type=DagRunType.DATASET_TRIGGERED,
logical_date=exec_date,
data_interval=data_interval,
session=session,
events=dataset_events,
)
dag_run = dag.create_dagrun(
run_id=run_id,
run_type=DagRunType.DATASET_TRIGGERED,
execution_date=exec_date,
data_interval=data_interval,
state=DagRunState.QUEUED,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.job.id,
)
Stats.incr("dataset.triggered_dagruns")
dag_run.consumed_dataset_events.extend(dataset_events)
session.execute(
delete(DatasetDagRunQueue).where(DatasetDagRunQueue.target_dag_id == dag_run.dag_id)
)
def _should_update_dag_next_dagruns(
self, dag: DAG, dag_model: DagModel, total_active_runs: int | None = None, *, session: Session
) -> bool:
"""Check if the dag's next_dagruns_create_after should be updated."""
# If the DAG never schedules skip save runtime
if not dag.timetable.can_be_scheduled:
return False
# get active dag runs from DB if not available
if not total_active_runs:
total_active_runs = dag.get_num_active_runs(only_running=False, session=session)
if total_active_runs and total_active_runs >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag_model.dag_id,
total_active_runs,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
return False
return True
def _start_queued_dagruns(self, session: Session) -> None:
"""Find DagRuns in queued state and decide moving them to running state."""
# added all() to save runtime, otherwise query is executed more than once
dag_runs: Collection[DagRun] = self._get_next_dagruns_to_examine(DagRunState.QUEUED, session).all()
active_runs_of_dags = Counter(
DagRun.active_runs_of_dags((dr.dag_id for dr in dag_runs), only_running=True, session=session),
)
def _update_state(dag: DAG, dag_run: DagRun):
dag_run.state = DagRunState.RUNNING
dag_run.start_date = timezone.utcnow()
if dag.timetable.periodic:
# TODO: Logically, this should be DagRunInfo.run_after, but the
# information is not stored on a DagRun, only before the actual
# execution on DagModel.next_dagrun_create_after. We should add
# a field on DagRun for this instead of relying on the run
# always happening immediately after the data interval.
expected_start_date = dag.get_run_data_interval(dag_run).end
schedule_delay = dag_run.start_date - expected_start_date
# Publish metrics twice with backward compatible name, and then with tags
Stats.timing(f"dagrun.schedule_delay.{dag.dag_id}", schedule_delay)
Stats.timing(
"dagrun.schedule_delay",
schedule_delay,
tags={"dag_id": dag.dag_id},
)
# cache saves time during scheduling of many dag_runs for same dag
cached_get_dag: Callable[[str], DAG | None] = lru_cache()(
partial(self.dagbag.get_dag, session=session)
)
for dag_run in dag_runs:
dag = dag_run.dag = cached_get_dag(dag_run.dag_id)
if not dag:
self.log.error("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
active_runs = active_runs_of_dags[dag_run.dag_id]
if dag.max_active_runs and active_runs >= dag.max_active_runs:
self.log.debug(
"DAG %s already has %d active runs, not moving any more runs to RUNNING state %s",
dag.dag_id,
active_runs,
dag_run.execution_date,
)
else:
active_runs_of_dags[dag_run.dag_id] += 1
_update_state(dag, dag_run)
dag_run.notify_dagrun_state_changed()
@retry_db_transaction
def _schedule_all_dag_runs(
self,
guard: CommitProhibitorGuard,
dag_runs: Iterable[DagRun],
session: Session,
) -> list[tuple[DagRun, DagCallbackRequest | None]]:
"""Makes scheduling decisions for all `dag_runs`."""
callback_tuples = [(run, self._schedule_dag_run(run, session=session)) for run in dag_runs]
guard.commit()
return callback_tuples
def _schedule_dag_run(
self,
dag_run: DagRun,
session: Session,
) -> DagCallbackRequest | None:
"""
Make scheduling decisions about an individual dag run.
:param dag_run: The DagRun to schedule
:return: Callback that needs to be executed
"""
callback: DagCallbackRequest | None = None
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
dag_model = DM.get_dagmodel(dag_run.dag_id, session)
if not dag or not dag_model:
self.log.error("Couldn't find DAG %s in DAG bag or database!", dag_run.dag_id)
return callback
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.set_state(DagRunState.FAILED)
unfinished_task_instances = session.scalars(
select(TI)
.where(TI.dag_id == dag_run.dag_id)
.where(TI.run_id == dag_run.run_id)
.where(TI.state.in_(State.unfinished))
)
for task_instance in unfinished_task_instances:
task_instance.state = TaskInstanceState.SKIPPED
session.merge(task_instance)
session.flush()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
# Work out if we should allow creating a new DagRun now?
if self._should_update_dag_next_dagruns(dag, dag_model, session=session):
dag_model.calculate_dagrun_date_fields(dag, dag.get_run_data_interval(dag_run))
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
run_id=dag_run.run_id,
is_failure_callback=True,
processor_subdir=dag_model.processor_subdir,
msg="timed_out",
)
dag_run.notify_dagrun_state_changed()
duration = dag_run.end_date - dag_run.start_date
Stats.timing(f"dagrun.duration.failed.{dag_run.dag_id}", duration)
Stats.timing("dagrun.duration.failed", duration, tags={"dag_id": dag_run.dag_id})
return callback_to_execute
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return callback
if not self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session):
self.log.warning("The DAG disappeared before verifying integrity: %s. Skipping.", dag_run.dag_id)
return callback
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
# Check if DAG not scheduled then skip interval calculation to same scheduler runtime
if dag_run.state in State.finished_dr_states:
# Work out if we should allow creating a new DagRun now?
if self._should_update_dag_next_dagruns(dag, dag_model, session=session):
dag_model.calculate_dagrun_date_fields(dag, dag.get_run_data_interval(dag_run))
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
dag_run.schedule_tis(schedulable_tis, session, max_tis_per_query=self.job.max_tis_per_query)
return callback_to_run
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session: Session) -> bool:
"""
Only run DagRun.verify integrity if Serialized DAG has changed since it is slow.
Return True if we determine that DAG still exists.
"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return True
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
if not dag_run.dag:
return False
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
return True
def _send_dag_callbacks_to_processor(self, dag: DAG, callback: DagCallbackRequest | None = None) -> None:
self._send_sla_callbacks_to_processor(dag)
if callback:
self.job.executor.send_callback(callback)
else:
self.log.debug("callback is empty")
def _send_sla_callbacks_to_processor(self, dag: DAG) -> None:
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True."""
if not settings.CHECK_SLAS:
return
if not any(isinstance(task.sla, timedelta) for task in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not dag.timetable.periodic:
self.log.debug("Skipping SLA check for %s because DAG is not scheduled", dag)
return
dag_model = DagModel.get_dagmodel(dag.dag_id)
if not dag_model:
self.log.error("Couldn't find DAG %s in database!", dag.dag_id)
return
request = SlaCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
processor_subdir=dag_model.processor_subdir,
)
self.job.executor.send_callback(request)
@provide_session
def _fail_tasks_stuck_in_queued(self, session: Session = NEW_SESSION) -> None:
"""
Mark tasks stuck in queued for longer than `task_queued_timeout` as failed.
Tasks can get stuck in queued for a wide variety of reasons (e.g. celery loses
track of a task, a cluster can't further scale up its workers, etc.), but tasks
should not be stuck in queued for a long time. This will mark tasks stuck in
queued for longer than `self._task_queued_timeout` as failed. If the task has
available retries, it will be retried.
"""
self.log.debug("Calling SchedulerJob._fail_tasks_stuck_in_queued method")
tasks_stuck_in_queued = session.scalars(
select(TI).where(
TI.state == TaskInstanceState.QUEUED,
TI.queued_dttm < (timezone.utcnow() - timedelta(seconds=self._task_queued_timeout)),
TI.queued_by_job_id == self.job.id,
)
).all()
try:
tis_for_warning_message = self.job.executor.cleanup_stuck_queued_tasks(tis=tasks_stuck_in_queued)
if tis_for_warning_message:
task_instance_str = "\n\t".join(tis_for_warning_message)
self.log.warning(
"Marked the following %s task instances stuck in queued as failed. "
"If the task instance has available retries, it will be retried.\n\t%s",
len(tasks_stuck_in_queued),
task_instance_str,
)
except NotImplementedError:
self.log.debug("Executor doesn't support cleanup of stuck queued tasks. Skipping.")
...
@provide_session
def _emit_pool_metrics(self, session: Session = NEW_SESSION) -> None:
from airflow.models.pool import Pool
pools = Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f"pool.open_slots.{pool_name}", slot_stats["open"])
Stats.gauge(f"pool.queued_slots.{pool_name}", slot_stats["queued"])
Stats.gauge(f"pool.running_slots.{pool_name}", slot_stats["running"])
# Same metrics with tagging
Stats.gauge("pool.open_slots", slot_stats["open"], tags={"pool_name": pool_name})
Stats.gauge("pool.queued_slots", slot_stats["queued"], tags={"pool_name": pool_name})
Stats.gauge("pool.running_slots", slot_stats["running"], tags={"pool_name": pool_name})
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = NEW_SESSION) -> int:
"""
Reset any TaskInstance in QUEUED or SCHEDULED state if its SchedulerJob is no longer running.
:return: the number of TIs reset
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint("scheduler", "scheduler_health_check_threshold")
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
num_failed = session.execute(
update(Job)
.where(
Job.job_type == "SchedulerJob",
Job.state == JobState.RUNNING,
Job.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.values(state=JobState.FAILED)
).rowcount
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + "_end", num_failed)
resettable_states = [TaskInstanceState.QUEUED, TaskInstanceState.RUNNING]
query = (
select(TI)
.where(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.where(or_(TI.queued_by_job_id.is_(None), Job.state != JobState.RUNNING))
.join(TI.dag_run)
.where(
DagRun.run_type != DagRunType.BACKFILL_JOB,
DagRun.state == DagRunState.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.run_id))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
)
tis_to_reset_or_adopt = session.scalars(tis_to_reset_or_adopt).all()
to_reset = self.job.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = None
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.job.id
Stats.incr("scheduler.orphaned_tasks.cleared", len(to_reset))
Stats.incr("scheduler.orphaned_tasks.adopted", len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = "\n\t".join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
@provide_session
def check_trigger_timeouts(self, session: Session = NEW_SESSION) -> None:
"""Mark any "deferred" task as failed if the trigger or execution timeout has passed."""
num_timed_out_tasks = session.execute(
update(TI)
.where(
TI.state == TaskInstanceState.DEFERRED,
TI.trigger_timeout < timezone.utcnow(),
)
.values(
state=TaskInstanceState.SCHEDULED,
next_method="__fail__",
next_kwargs={"error": "Trigger/execution timeout"},
trigger_id=None,
)
).rowcount
if num_timed_out_tasks:
self.log.info("Timed out %i deferred tasks without fired triggers", num_timed_out_tasks)
def _find_zombies(self) -> None:
"""
Find zombie task instances and create a TaskCallbackRequest to be handled by the DAG processor.
Zombie instances are tasks haven't heartbeated for too long or have a no-longer-running LocalTaskJob.
"""
from airflow.jobs.job import Job
self.log.debug("Finding 'running' jobs without a recent heartbeat")
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
with create_session() as session:
zombies: list[tuple[TI, str, str]] = (
session.execute(
select(TI, DM.fileloc, DM.processor_subdir)
.with_hint(TI, "USE INDEX (ti_state)", dialect_name="mysql")
.join(Job, TI.job_id == Job.id)
.join(DM, TI.dag_id == DM.dag_id)
.where(TI.state == TaskInstanceState.RUNNING)
.where(
or_(
Job.state != JobState.RUNNING,
Job.latest_heartbeat < limit_dttm,
)
)
.where(Job.job_type == "LocalTaskJob")
.where(TI.queued_by_job_id == self.job.id)
)
.unique()
.all()
)
if zombies:
self.log.warning("Failing (%s) jobs without heartbeat after %s", len(zombies), limit_dttm)
for ti, file_loc, processor_subdir in zombies:
zombie_message_details = self._generate_zombie_message_details(ti)
request = TaskCallbackRequest(
full_filepath=file_loc,
processor_subdir=processor_subdir,
simple_task_instance=SimpleTaskInstance.from_ti(ti),
msg=str(zombie_message_details),
)
self.log.error("Detected zombie job: %s", request)
self.job.executor.send_callback(request)
Stats.incr("zombies_killed", tags={"dag_id": ti.dag_id, "task_id": ti.task_id})
@staticmethod
def _generate_zombie_message_details(ti: TI) -> dict[str, Any]:
zombie_message_details = {
"DAG Id": ti.dag_id,
"Task Id": ti.task_id,
"Run Id": ti.run_id,
}
if ti.map_index != -1:
zombie_message_details["Map Index"] = ti.map_index
if ti.hostname:
zombie_message_details["Hostname"] = ti.hostname
if ti.external_executor_id:
zombie_message_details["External Executor Id"] = ti.external_executor_id
return zombie_message_details
@provide_session
def _cleanup_stale_dags(self, session: Session = NEW_SESSION) -> None:
"""
Find all dags that were not updated by Dag Processor recently and mark them as inactive.
In case one of DagProcessors is stopped (in case there are multiple of them
for different dag folders), it's dags are never marked as inactive.
Also remove dags from SerializedDag table.
Executed on schedule only if [scheduler]standalone_dag_processor is True.
"""
self.log.debug("Checking dags not parsed within last %s seconds.", self._dag_stale_not_seen_duration)
limit_lpt = timezone.utcnow() - timedelta(seconds=self._dag_stale_not_seen_duration)
stale_dags = session.scalars(
select(DagModel).where(DagModel.is_active, DagModel.last_parsed_time < limit_lpt)
).all()
if not stale_dags:
self.log.debug("Not stale dags found.")
return
self.log.info("Found (%d) stales dags not parsed after %s.", len(stale_dags), limit_lpt)
for dag in stale_dags:
dag.is_active = False
SerializedDagModel.remove_dag(dag_id=dag.dag_id, session=session)
session.flush()
def _set_orphaned(self, dataset: DatasetModel) -> int:
self.log.info("Orphaning unreferenced dataset '%s'", dataset.uri)
dataset.is_orphaned = expression.true()
return 1
@provide_session
def _orphan_unreferenced_datasets(self, session: Session = NEW_SESSION) -> None:
"""
Detect orphaned datasets and set is_orphaned flag to True.
An orphaned dataset is no longer referenced in any DAG schedule parameters or task outlets.
"""
orphaned_dataset_query = session.scalars(
select(DatasetModel)
.join(
DagScheduleDatasetReference,
isouter=True,
)
.join(
TaskOutletDatasetReference,
isouter=True,
)
# MSSQL doesn't like it when we select a column that we haven't grouped by. All other DBs let us
# group by id and select all columns.
.group_by(DatasetModel if session.get_bind().dialect.name == "mssql" else DatasetModel.id)
.having(
and_(
func.count(DagScheduleDatasetReference.dag_id) == 0,
func.count(TaskOutletDatasetReference.dag_id) == 0,
)
)
)
updated_count = sum(self._set_orphaned(dataset) for dataset in orphaned_dataset_query)
Stats.gauge("dataset.orphaned", updated_count)
| 80,789 | 43.983296 | 110 |
py
|
airflow
|
airflow-main/airflow/jobs/triggerer_job_runner.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import logging
import os
import signal
import sys
import threading
import time
import warnings
from collections import deque
from contextlib import suppress
from copy import copy
from queue import SimpleQueue
from typing import TYPE_CHECKING
from sqlalchemy import func
from airflow.configuration import conf
from airflow.jobs.base_job_runner import BaseJobRunner
from airflow.jobs.job import Job, perform_heartbeat
from airflow.models.trigger import Trigger
from airflow.serialization.pydantic.job import JobPydantic
from airflow.stats import Stats
from airflow.triggers.base import BaseTrigger, TriggerEvent
from airflow.typing_compat import TypedDict
from airflow.utils import timezone
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.trigger_handler import (
DropTriggerLogsFilter,
LocalQueueHandler,
TriggererHandlerWrapper,
TriggerMetadataFilter,
ctx_indiv_trigger,
ctx_task_instance,
ctx_trigger_end,
ctx_trigger_id,
)
from airflow.utils.module_loading import import_string
from airflow.utils.session import provide_session
if TYPE_CHECKING:
from airflow.models import TaskInstance
HANDLER_SUPPORTS_TRIGGERER = False
"""
If this value is true, root handler is configured to log individual trigger messages
visible in task logs.
:meta private:
"""
SEND_TRIGGER_END_MARKER = True
"""
If handler natively supports triggers, may want to disable sending trigger end marker.
:meta private:
"""
logger = logging.getLogger(__name__)
DISABLE_WRAPPER = conf.getboolean("logging", "disable_trigger_handler_wrapper", fallback=False)
DISABLE_LISTENER = conf.getboolean("logging", "disable_trigger_handler_queue_listener", fallback=False)
def configure_trigger_log_handler():
"""
Configure logging where each trigger logs to its own file and can be exposed via the airflow webserver.
Generally speaking, we take the log handler configured for logger ``airflow.task``,
wrap it with TriggerHandlerWrapper, and set it as the handler for root logger.
If there already is a handler configured for the root logger and it supports triggers, we wrap it instead.
:meta private:
"""
global HANDLER_SUPPORTS_TRIGGERER
def should_wrap(handler):
return handler.__dict__.get("trigger_should_wrap", False) or handler.__class__.__dict__.get(
"trigger_should_wrap", False
)
def should_queue(handler):
return handler.__dict__.get("trigger_should_queue", True) or handler.__class__.__dict__.get(
"trigger_should_queue", True
)
def send_trigger_end_marker(handler):
val = handler.__dict__.get("trigger_send_end_marker", None)
if val is not None:
return val
val = handler.__class__.__dict__.get("trigger_send_end_marker", None)
if val is not None:
return val
return True
def supports_triggerer(handler):
return (
should_wrap(handler)
or handler.__dict__.get("trigger_supported", False)
or handler.__class__.__dict__.get("trigger_supported", False)
)
def get_task_handler_from_logger(logger_):
for h in logger_.handlers:
if isinstance(h, FileTaskHandler) and not supports_triggerer(h):
warnings.warn(
f"Handler {h.__class__.__name__} does not support "
"individual trigger logging. Please check the release notes "
"for your provider to see if a newer version supports "
"individual trigger logging."
)
if supports_triggerer(h):
return h
def find_suitable_task_handler():
# check root logger then check airflow.task to see if a handler
# suitable for use with TriggerHandlerWrapper (has trigger_should_wrap
# attr, likely inherits from FileTaskHandler)
h = get_task_handler_from_logger(root_logger)
if not h:
# try to use handler configured from airflow task
logger.debug("No task logger configured for root logger; trying `airflow.task`.")
h = get_task_handler_from_logger(logging.getLogger("airflow.task"))
if h:
logger.debug("Using logging configuration from `airflow.task`")
if not h:
warnings.warn("Could not find log handler suitable for individual trigger logging.")
return None
return h
def filter_trigger_logs_from_other_root_handlers(new_hdlr):
# we add context vars to log records emitted for individual triggerer logging
# we want these records to be processed by our special trigger handler wrapper
# but not by any other handlers, so we filter out these messages from
# other handlers by adding DropTriggerLogsFilter
# we could consider only adding this filter to the default console logger
# so as to leave other custom handlers alone
for h in root_logger.handlers:
if h is not new_hdlr:
h.addFilter(DropTriggerLogsFilter())
def add_handler_wrapper_to_root(base_handler):
# first make sure we remove from root logger if it happens to be there
# it could have come from root or airflow.task, but we only need
# to make sure we remove from root, since messages will not flow
# through airflow.task
if base_handler in root_logger.handlers:
root_logger.removeHandler(base_handler)
logger.info("Setting up TriggererHandlerWrapper with handler %s", base_handler)
h = TriggererHandlerWrapper(base_handler=base_handler, level=base_handler.level)
# just extra cautious, checking if user manually configured it there
if h not in root_logger.handlers:
root_logger.addHandler(h)
return h
root_logger = logging.getLogger()
task_handler = find_suitable_task_handler()
if not task_handler:
return None
if TYPE_CHECKING:
assert isinstance(task_handler, FileTaskHandler)
if should_wrap(task_handler):
trigger_handler = add_handler_wrapper_to_root(task_handler)
else:
trigger_handler = copy(task_handler)
root_logger.addHandler(trigger_handler)
filter_trigger_logs_from_other_root_handlers(trigger_handler)
if send_trigger_end_marker(trigger_handler) is False:
global SEND_TRIGGER_END_MARKER
SEND_TRIGGER_END_MARKER = False
HANDLER_SUPPORTS_TRIGGERER = True
return should_queue(trigger_handler)
def setup_queue_listener():
"""
Route log messages to a queue and process them with QueueListener.
Airflow task handlers make blocking I/O calls.
We replace trigger log handlers, with LocalQueueHandler,
which sends log records to a queue.
Then we start a QueueListener in a thread, which is configured
to consume the queue and pass the records to the handlers as
originally configured. This keeps the handler I/O out of the
async event loop.
:meta private:
"""
queue = SimpleQueue()
root_logger = logging.getLogger()
handlers: list[logging.Handler] = []
queue_handler = LocalQueueHandler(queue)
queue_handler.addFilter(TriggerMetadataFilter())
root_logger.addHandler(queue_handler)
for h in root_logger.handlers[:]:
if h is not queue_handler and "pytest" not in h.__module__:
root_logger.removeHandler(h)
handlers.append(h)
this_logger = logging.getLogger(__name__)
if handlers:
this_logger.info("Setting up logging queue listener with handlers %s", handlers)
listener = logging.handlers.QueueListener(queue, *handlers, respect_handler_level=True)
listener.start()
return listener
else:
this_logger.warning("Unable to set up individual trigger logging")
return None
class TriggererJobRunner(BaseJobRunner["Job | JobPydantic"], LoggingMixin):
"""
Run active triggers in asyncio and update their dependent tests/DAGs once their events have fired.
It runs as two threads:
- The main thread does DB calls/checkins
- A subthread runs all the async code
"""
job_type = "TriggererJob"
def __init__(
self,
job: Job | JobPydantic,
capacity=None,
):
super().__init__(job)
if capacity is None:
self.capacity = conf.getint("triggerer", "default_capacity", fallback=1000)
elif isinstance(capacity, int) and capacity > 0:
self.capacity = capacity
else:
raise ValueError(f"Capacity number {capacity} is invalid")
should_queue = True
if DISABLE_WRAPPER:
self.log.warning(
"Skipping trigger log configuration; disabled by param "
"`disable_trigger_handler_wrapper=True`."
)
else:
should_queue = configure_trigger_log_handler()
self.listener = None
if DISABLE_LISTENER:
self.log.warning(
"Skipping trigger logger queue listener; disabled by param "
"`disable_trigger_handler_queue_listener=True`."
)
elif should_queue is False:
self.log.warning("Skipping trigger logger queue listener; disabled by handler setting.")
else:
self.listener = setup_queue_listener()
# Set up runner async thread
self.trigger_runner = TriggerRunner()
def register_signals(self) -> None:
"""Register signals that stop child processes."""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
@classmethod
@provide_session
def is_needed(cls, session) -> bool:
"""
Tests if the triggerer job needs to be run (i.e., if there are triggers in the trigger table).
This is used for the warning boxes in the UI.
"""
return session.query(func.count(Trigger.id)).scalar() > 0
def on_kill(self):
"""Called when there is an external kill command (via the heartbeat mechanism, for example)."""
self.trigger_runner.stop = True
def _kill_listener(self):
if self.listener:
for h in self.listener.handlers:
h.close()
self.listener.stop()
def _exit_gracefully(self, signum, frame) -> None:
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
# The first time, try to exit nicely
if not self.trigger_runner.stop:
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.trigger_runner.stop = True
self._kill_listener()
else:
self.log.warning("Forcing exit due to second exit signal %s", signum)
sys.exit(os.EX_SOFTWARE)
def _execute(self) -> int | None:
self.log.info("Starting the triggerer")
try:
# set job_id so that it can be used in log file names
self.trigger_runner.job_id = self.job.id
# Kick off runner thread
self.trigger_runner.start()
# Start our own DB loop in the main thread
self._run_trigger_loop()
except Exception:
self.log.exception("Exception when executing TriggererJobRunner._run_trigger_loop")
raise
finally:
self.log.info("Waiting for triggers to clean up")
# Tell the subthread to stop and then wait for it.
# If the user interrupts/terms again, _graceful_exit will allow them
# to force-kill here.
self.trigger_runner.stop = True
self.trigger_runner.join(30)
self.log.info("Exited trigger loop")
return None
def _run_trigger_loop(self) -> None:
"""
The main-thread trigger loop.
This runs synchronously and handles all database reads/writes.
"""
while not self.trigger_runner.stop:
if not self.trigger_runner.is_alive():
self.log.error("Trigger runner thread has died! Exiting.")
break
# Clean out unused triggers
Trigger.clean_unused()
# Load/delete triggers
self.load_triggers()
# Handle events
self.handle_events()
# Handle failed triggers
self.handle_failed_triggers()
perform_heartbeat(self.job, heartbeat_callback=self.heartbeat_callback, only_if_necessary=True)
# Collect stats
self.emit_metrics()
# Idle sleep
time.sleep(1)
def load_triggers(self):
"""Query the database for the triggers we're supposed to be running and update the runner."""
Trigger.assign_unassigned(self.job.id, self.capacity, self.job.heartrate)
ids = Trigger.ids_for_triggerer(self.job.id)
self.trigger_runner.update_triggers(set(ids))
def handle_events(self):
"""Dispatch outbound events to the Trigger model which pushes them to the relevant task instances."""
while self.trigger_runner.events:
# Get the event and its trigger ID
trigger_id, event = self.trigger_runner.events.popleft()
# Tell the model to wake up its tasks
Trigger.submit_event(trigger_id=trigger_id, event=event)
# Emit stat event
Stats.incr("triggers.succeeded")
def handle_failed_triggers(self):
"""
Handles "failed" triggers. - ones that errored or exited before they sent an event.
Task Instances that depend on them need failing.
"""
while self.trigger_runner.failed_triggers:
# Tell the model to fail this trigger's deps
trigger_id, saved_exc = self.trigger_runner.failed_triggers.popleft()
Trigger.submit_failure(trigger_id=trigger_id, exc=saved_exc)
# Emit stat event
Stats.incr("triggers.failed")
def emit_metrics(self):
Stats.gauge(f"triggers.running.{self.job.hostname}", len(self.trigger_runner.triggers))
Stats.gauge(
"triggers.running", len(self.trigger_runner.triggers), tags={"hostname": self.job.hostname}
)
class TriggerDetails(TypedDict):
"""Type class for the trigger details dictionary."""
task: asyncio.Task
name: str
events: int
class TriggerRunner(threading.Thread, LoggingMixin):
"""
Runtime environment for all triggers.
Mainly runs inside its own thread, where it hands control off to an asyncio
event loop, but is also sometimes interacted with from the main thread
(where all the DB queries are done). All communication between threads is
done via Deques.
"""
# Maps trigger IDs to their running tasks and other info
triggers: dict[int, TriggerDetails]
# Cache for looking up triggers by classpath
trigger_cache: dict[str, type[BaseTrigger]]
# Inbound queue of new triggers
to_create: deque[tuple[int, BaseTrigger]]
# Inbound queue of deleted triggers
to_cancel: deque[int]
# Outbound queue of events
events: deque[tuple[int, TriggerEvent]]
# Outbound queue of failed triggers
failed_triggers: deque[tuple[int, BaseException]]
# Should-we-stop flag
stop: bool = False
def __init__(self):
super().__init__()
self.triggers = {}
self.trigger_cache = {}
self.to_create = deque()
self.to_cancel = deque()
self.events = deque()
self.failed_triggers = deque()
self.job_id = None
def run(self):
"""Sync entrypoint - just runs arun in an async loop."""
asyncio.run(self.arun())
async def arun(self):
"""
Main (asynchronous) logic loop.
The loop in here runs trigger addition/deletion/cleanup. Actual
triggers run in their own separate coroutines.
"""
watchdog = asyncio.create_task(self.block_watchdog())
last_status = time.time()
while not self.stop:
try:
# Run core logic
await self.create_triggers()
await self.cancel_triggers()
await self.cleanup_finished_triggers()
# Sleep for a bit
await asyncio.sleep(1)
# Every minute, log status
if time.time() - last_status >= 60:
count = len(self.triggers)
self.log.info("%i triggers currently running", count)
last_status = time.time()
except Exception:
self.stop = True
raise
# Wait for watchdog to complete
await watchdog
async def create_triggers(self):
"""Drain the to_create queue and create all new triggers that have been requested in the DB."""
while self.to_create:
trigger_id, trigger_instance = self.to_create.popleft()
if trigger_id not in self.triggers:
task_instance: TaskInstance = trigger_instance.task_instance
dag_id = task_instance.dag_id
run_id = task_instance.run_id
task_id = task_instance.task_id
map_index = task_instance.map_index
try_number = task_instance.try_number
self.triggers[trigger_id] = {
"task": asyncio.create_task(self.run_trigger(trigger_id, trigger_instance)),
"name": f"{dag_id}/{run_id}/{task_id}/{map_index}/{try_number} (ID {trigger_id})",
"events": 0,
}
else:
self.log.warning("Trigger %s had insertion attempted twice", trigger_id)
await asyncio.sleep(0)
async def cancel_triggers(self):
"""
Drain the to_cancel queue and ensure all triggers that are not in the DB are cancelled.
This allows the the cleanup job to delete them.
"""
while self.to_cancel:
trigger_id = self.to_cancel.popleft()
if trigger_id in self.triggers:
# We only delete if it did not exit already
self.triggers[trigger_id]["task"].cancel()
await asyncio.sleep(0)
async def cleanup_finished_triggers(self):
"""
Go through all trigger tasks (coroutines) and clean up entries for ones that have exited.
Optionally warn users if the exit was not normal.
"""
for trigger_id, details in list(self.triggers.items()):
if details["task"].done():
# Check to see if it exited for good reasons
saved_exc = None
try:
result = details["task"].result()
except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):
# These are "expected" exceptions and we stop processing here
# If we don't, then the system requesting a trigger be removed -
# which turns into CancelledError - results in a failure.
del self.triggers[trigger_id]
continue
except BaseException as e:
# This is potentially bad, so log it.
self.log.exception("Trigger %s exited with error %s", details["name"], e)
saved_exc = e
else:
# See if they foolishly returned a TriggerEvent
if isinstance(result, TriggerEvent):
self.log.error(
"Trigger %s returned a TriggerEvent rather than yielding it", details["name"]
)
# See if this exited without sending an event, in which case
# any task instances depending on it need to be failed
if details["events"] == 0:
self.log.error(
"Trigger %s exited without sending an event. Dependent tasks will be failed.",
details["name"],
)
self.failed_triggers.append((trigger_id, saved_exc))
del self.triggers[trigger_id]
await asyncio.sleep(0)
async def block_watchdog(self):
"""
Watchdog loop that detects blocking (badly-written) triggers.
Triggers should be well-behaved async coroutines and await whenever
they need to wait; this loop tries to run every 100ms to see if
there are badly-written triggers taking longer than that and blocking
the event loop.
Unfortunately, we can't tell what trigger is blocking things, but
we can at least detect the top-level problem.
"""
while not self.stop:
last_run = time.monotonic()
await asyncio.sleep(0.1)
# We allow a generous amount of buffer room for now, since it might
# be a busy event loop.
time_elapsed = time.monotonic() - last_run
if time_elapsed > 0.2:
self.log.error(
"Triggerer's async thread was blocked for %.2f seconds, "
"likely by a badly-written trigger. Set PYTHONASYNCIODEBUG=1 "
"to get more information on overrunning coroutines.",
time_elapsed,
)
Stats.incr("triggers.blocked_main_thread")
@staticmethod
def set_individual_trigger_logging(trigger):
"""Configure trigger logging to allow individual files and stdout filtering."""
# set logging context vars for routing to appropriate handler
ctx_task_instance.set(trigger.task_instance)
ctx_trigger_id.set(trigger.trigger_id)
ctx_trigger_end.set(False)
# mark that we're in the context of an individual trigger so log records can be filtered
ctx_indiv_trigger.set(True)
async def run_trigger(self, trigger_id, trigger):
"""Run a trigger (they are async generators) and push their events into our outbound event deque."""
name = self.triggers[trigger_id]["name"]
self.log.info("trigger %s starting", name)
try:
self.set_individual_trigger_logging(trigger)
async for event in trigger.run():
self.log.info("Trigger %s fired: %s", self.triggers[trigger_id]["name"], event)
self.triggers[trigger_id]["events"] += 1
self.events.append((trigger_id, event))
except asyncio.CancelledError as err:
if timeout := trigger.task_instance.trigger_timeout:
timeout = timeout.replace(tzinfo=timezone.utc) if not timeout.tzinfo else timeout
if timeout < timezone.utcnow():
self.log.error("Trigger cancelled due to timeout")
self.log.error("Trigger cancelled; message=%s", err)
raise
finally:
# CancelledError will get injected when we're stopped - which is
# fine, the cleanup process will understand that, but we want to
# allow triggers a chance to cleanup, either in that case or if
# they exit cleanly. Exception from cleanup methods are ignored.
with suppress(Exception):
await trigger.cleanup()
if SEND_TRIGGER_END_MARKER:
self.mark_trigger_end(trigger)
# unsetting ctx_indiv_trigger var restores stdout logging
ctx_indiv_trigger.set(None)
self.log.info("trigger %s completed", name)
@staticmethod
def mark_trigger_end(trigger):
if not HANDLER_SUPPORTS_TRIGGERER:
return
ctx_trigger_end.set(True)
# this is a special message required by TriggerHandlerWrapper
# it tells the wrapper to close the handler for this trigger
# we set level to 100 so that it will not be filtered by user logging settings
# it is not emitted; see TriggererHandlerWrapper.handle method.
trigger.log.log(level=100, msg="trigger end")
def update_triggers(self, requested_trigger_ids: set[int]):
"""
Called from the main thread to request that we update what triggers we're running.
Works out the differences - ones to add, and ones to remove - then
adds them to the deques so the subthread can actually mutate the running
trigger set.
"""
# Note that `triggers` could be mutated by the other thread during this
# line's execution, but we consider that safe, since there's a strict
# add -> remove -> never again lifecycle this function is already
# handling.
running_trigger_ids = set(self.triggers.keys())
known_trigger_ids = (
running_trigger_ids.union(x[0] for x in self.events)
.union(self.to_cancel)
.union(x[0] for x in self.to_create)
.union(trigger[0] for trigger in self.failed_triggers)
)
# Work out the two difference sets
new_trigger_ids = requested_trigger_ids - known_trigger_ids
cancel_trigger_ids = running_trigger_ids - requested_trigger_ids
# Bulk-fetch new trigger records
new_triggers = Trigger.bulk_fetch(new_trigger_ids)
# Add in new triggers
for new_id in new_trigger_ids:
# Check it didn't vanish in the meantime
if new_id not in new_triggers:
self.log.warning("Trigger ID %s disappeared before we could start it", new_id)
continue
# Resolve trigger record into an actual class instance
try:
new_trigger_orm = new_triggers[new_id]
trigger_class = self.get_trigger_by_classpath(new_trigger_orm.classpath)
except BaseException as e:
# Either the trigger code or the path to it is bad. Fail the trigger.
self.failed_triggers.append((new_id, e))
continue
try:
new_trigger_instance = trigger_class(**new_trigger_orm.kwargs)
except TypeError as err:
self.log.error("Trigger failed; message=%s", err)
self.failed_triggers.append((new_id, err))
continue
self.set_trigger_logging_metadata(new_trigger_orm.task_instance, new_id, new_trigger_instance)
self.to_create.append((new_id, new_trigger_instance))
# Enqueue orphaned triggers for cancellation
for old_id in cancel_trigger_ids:
self.to_cancel.append(old_id)
def set_trigger_logging_metadata(self, ti: TaskInstance, trigger_id, trigger):
"""
Set up logging for triggers.
We want to ensure that each trigger logs to its own file and that the log messages are not
propagated to parent loggers.
:meta private:
"""
if ti: # can be None in tests
ti.is_trigger_log_context = True
trigger.task_instance = ti
trigger.triggerer_job_id = self.job_id
trigger.trigger_id = trigger_id
def get_trigger_by_classpath(self, classpath: str) -> type[BaseTrigger]:
"""
Gets a trigger class by its classpath ("path.to.module.classname").
Uses a cache dictionary to speed up lookups after the first time.
"""
if classpath not in self.trigger_cache:
self.trigger_cache[classpath] = import_string(classpath)
return self.trigger_cache[classpath]
| 28,596 | 39.334274 | 110 |
py
|
airflow
|
airflow-main/airflow/jobs/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/jobs/base_job_runner.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Generic, TypeVar
from airflow.utils.session import NEW_SESSION, provide_session
if TYPE_CHECKING:
from sqlalchemy.orm import Session
from airflow.jobs.job import Job
from airflow.serialization.pydantic.job import JobPydantic
J = TypeVar("J", "Job", "JobPydantic", "Job | JobPydantic")
class BaseJobRunner(Generic[J]):
"""Abstract class for job runners to derive from."""
job_type = "undefined"
def __init__(self, job: J) -> None:
if job.job_type and job.job_type != self.job_type:
raise Exception(
f"The job is already assigned a different job_type: {job.job_type}."
f"This is a bug and should be reported."
)
job.job_type = self.job_type
self.job: J = job
def _execute(self) -> int | None:
"""
Executes the logic connected to the runner. This method should be overridden by subclasses.
:meta private:
:return: return code if available, otherwise None
"""
raise NotImplementedError()
@provide_session
def heartbeat_callback(self, session: Session = NEW_SESSION) -> None:
"""Callback that is called during heartbeat. This method can be overwritten by the runners."""
@classmethod
@provide_session
def most_recent_job(cls, session: Session = NEW_SESSION) -> Job | None:
"""Returns the most recent job of this type, if any, based on last heartbeat received."""
from airflow.jobs.job import most_recent_job
return most_recent_job(cls.job_type, session=session)
| 2,434 | 35.343284 | 102 |
py
|
airflow
|
airflow-main/airflow/jobs/job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from time import sleep
from typing import Callable, NoReturn
from sqlalchemy import Column, Index, Integer, String, case, select
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import backref, foreign, relationship
from sqlalchemy.orm.session import Session, make_transient
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.listeners.listener import get_listener_manager
from airflow.models.base import ID_LEN, Base
from airflow.serialization.pydantic.job import JobPydantic
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.helpers import convert_camel_to_snake
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.platform import getuser
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import JobState
def _resolve_dagrun_model():
from airflow.models.dagrun import DagRun
return DagRun
class Job(Base, LoggingMixin):
"""
The ORM class representing Job stored in the database.
Jobs are processing items with state and duration that aren't task instances.
For instance a BackfillJob is a collection of task instance runs,
but should have its own state, start and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(
String(ID_LEN),
)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(UtcDateTime())
end_date = Column(UtcDateTime())
latest_heartbeat = Column(UtcDateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__table_args__ = (
Index("job_type_heart", job_type, latest_heartbeat),
Index("idx_job_state_heartbeat", state, latest_heartbeat),
Index("idx_job_dag_id", dag_id),
)
task_instances_enqueued = relationship(
"TaskInstance",
primaryjoin="Job.id == foreign(TaskInstance.queued_by_job_id)",
backref=backref("queued_by_job", uselist=False),
)
dag_runs = relationship(
"DagRun",
primaryjoin=lambda: Job.id == foreign(_resolve_dagrun_model().creating_job_id),
backref="creating_job",
)
"""
TaskInstances which have been enqueued by this Job.
Only makes sense for SchedulerJob and BackfillJob instances.
"""
heartrate = conf.getfloat("scheduler", "JOB_HEARTBEAT_SEC")
def __init__(self, executor=None, heartrate=None, **kwargs):
# Save init parameters as DB fields
self.hostname = get_hostname()
if executor:
self.executor = executor
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
if heartrate is not None:
self.heartrate = heartrate
self.unixname = getuser()
self.max_tis_per_query: int = conf.getint("scheduler", "max_tis_per_query")
get_listener_manager().hook.on_starting(component=self)
super().__init__(**kwargs)
@cached_property
def executor(self):
return ExecutorLoader.get_default_executor()
def is_alive(self, grace_multiplier=2.1):
"""
Is this job currently alive.
We define alive as in a state of RUNNING, and having sent a heartbeat
within a multiple of the heartrate (default of 2.1)
:param grace_multiplier: multiplier of heartrate to require heart beat
within
"""
if self.job_type == "SchedulerJob":
health_check_threshold: int = conf.getint("scheduler", "scheduler_health_check_threshold")
else:
health_check_threshold: int = self.heartrate * grace_multiplier
return (
self.state == JobState.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < health_check_threshold
)
@provide_session
def kill(self, session: Session = NEW_SESSION) -> NoReturn:
"""Handles on_kill callback and updates state in database."""
job = session.scalar(select(Job).where(Job.id == self.id).limit(1))
job.end_date = timezone.utcnow()
try:
self.on_kill()
except Exception as e:
self.log.error("on_kill() method failed: %s", str(e))
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
"""Will be called when an external kill command is received."""
@provide_session
def heartbeat(
self, heartbeat_callback: Callable[[Session], None], session: Session = NEW_SESSION
) -> None:
"""
Update the job's entry in the database with the latest_heartbeat timestamp.
This allows for the job to be killed externally and allows the system
to monitor what is actually active. For instance, an old heartbeat
for SchedulerJob would mean something is wrong. This also allows for
any job to be killed externally, regardless of who is running it or on
which machine it is running.
Note that if your heart rate is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
:param heartbeat_callback: Callback that will be run when the heartbeat is recorded in the Job
:param session to use for saving the job
"""
previous_heartbeat = self.latest_heartbeat
try:
# This will cause it to load from the db
session.merge(self)
previous_heartbeat = self.latest_heartbeat
if self.state in (JobState.SHUTDOWN, JobState.RESTARTING):
# TODO: Make sure it is AIP-44 compliant
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if self.latest_heartbeat:
seconds_remaining = (
self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds()
)
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
# Make the session aware of this object
session.merge(self)
self.latest_heartbeat = timezone.utcnow()
session.commit()
# At this point, the DB has updated.
previous_heartbeat = self.latest_heartbeat
heartbeat_callback(session)
self.log.debug("[heartbeat]")
except OperationalError:
Stats.incr(convert_camel_to_snake(self.__class__.__name__) + "_heartbeat_failure", 1, 1)
self.log.exception("%s heartbeat got an exception", self.__class__.__name__)
# We didn't manage to heartbeat, so make sure that the timestamp isn't updated
self.latest_heartbeat = previous_heartbeat
@provide_session
def prepare_for_execution(self, session: Session = NEW_SESSION):
"""Prepares the job for execution."""
Stats.incr(self.__class__.__name__.lower() + "_start", 1, 1)
self.state = JobState.RUNNING
self.start_date = timezone.utcnow()
session.add(self)
session.commit()
make_transient(self)
@provide_session
def complete_execution(self, session: Session = NEW_SESSION):
get_listener_manager().hook.before_stopping(component=self)
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + "_end", 1, 1)
@provide_session
def most_recent_job(self, session: Session = NEW_SESSION) -> Job | None:
"""Returns the most recent job of this type, if any, based on last heartbeat received."""
return most_recent_job(self.job_type, session=session)
@provide_session
def most_recent_job(job_type: str, session: Session = NEW_SESSION) -> Job | None:
"""
Return the most recent job of this type, if any, based on last heartbeat received.
Jobs in "running" state take precedence over others to make sure alive
job is returned if it is available.
:param job_type: job type to query for to get the most recent job for
:param session: Database session
"""
return session.scalar(
select(Job)
.where(Job.job_type == job_type)
.order_by(
# Put "running" jobs at the front.
case({JobState.RUNNING: 0}, value=Job.state, else_=1),
Job.latest_heartbeat.desc(),
)
.limit(1)
)
@provide_session
def run_job(
job: Job | JobPydantic, execute_callable: Callable[[], int | None], session: Session = NEW_SESSION
) -> int | None:
"""
Runs the job.
The Job is always an ORM object and setting the state is happening within the
same DB session and the session is kept open throughout the whole execution.
:meta private:
TODO: Maybe we should not keep the session during job execution ?.
"""
# The below assert is a temporary one, to make MyPy happy with partial AIP-44 work - we will remove it
# once final AIP-44 changes are completed.
assert not isinstance(job, JobPydantic), "Job should be ORM object not Pydantic one here (AIP-44 WIP)"
job.prepare_for_execution(session=session)
try:
return execute_job(job, execute_callable=execute_callable)
finally:
job.complete_execution(session=session)
def execute_job(job: Job | JobPydantic, execute_callable: Callable[[], int | None]) -> int | None:
"""
Executes the job.
Job execution requires no session as generally executing session does not require an
active database connection. The session might be temporary acquired and used if the job
runs heartbeat during execution, but this connection is only acquired for the time of heartbeat
and in case of AIP-44 implementation it happens over the Internal API rather than directly via
the database.
After the job is completed, state of the Job is updated and it should be updated in the database,
which happens in the "complete_execution" step (which again can be executed locally in case of
database operations or over the Internal API call.
:param job: Job to execute - it can be either DB job or it's Pydantic serialized version. It does
not really matter, because except of running the heartbeat and state setting,
the runner should not modify the job state.
:param execute_callable: callable to execute when running the job.
:meta private:
"""
ret = None
try:
ret = execute_callable()
# In case of max runs or max duration
job.state = JobState.SUCCESS
except SystemExit:
# In case of ^C or SIGTERM
job.state = JobState.SUCCESS
except Exception:
job.state = JobState.FAILED
raise
return ret
def perform_heartbeat(
job: Job | JobPydantic, heartbeat_callback: Callable[[Session], None], only_if_necessary: bool
) -> None:
"""
Performs heartbeat for the Job passed to it,optionally checking if it is necessary.
:param job: job to perform heartbeat for
:param heartbeat_callback: callback to run by the heartbeat
:param only_if_necessary: only heartbeat if it is necessary (i.e. if there are things to run for
triggerer for example)
"""
# The below assert is a temporary one, to make MyPy happy with partial AIP-44 work - we will remove it
# once final AIP-44 changes are completed.
assert not isinstance(job, JobPydantic), "Job should be ORM object not Pydantic one here (AIP-44 WIP)"
seconds_remaining: float = 0.0
if job.latest_heartbeat and job.heartrate:
seconds_remaining = job.heartrate - (timezone.utcnow() - job.latest_heartbeat).total_seconds()
if seconds_remaining > 0 and only_if_necessary:
return
with create_session() as session:
job.heartbeat(heartbeat_callback=heartbeat_callback, session=session)
| 13,328 | 37.973684 | 106 |
py
|
airflow
|
airflow-main/airflow/jobs/dag_processor_job_runner.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from airflow.dag_processing.manager import DagFileProcessorManager
from airflow.jobs.base_job_runner import BaseJobRunner
from airflow.jobs.job import Job, perform_heartbeat
from airflow.utils.log.logging_mixin import LoggingMixin
def empty_callback(_: Any) -> None:
pass
class DagProcessorJobRunner(BaseJobRunner[Job], LoggingMixin):
"""
DagProcessorJobRunner is a job runner that runs a DagFileProcessorManager processor.
:param job: Job instance to use
:param processor: DagFileProcessorManager instance to use
"""
job_type = "DagProcessorJob"
def __init__(
self,
job: Job,
processor: DagFileProcessorManager,
*args,
**kwargs,
):
super().__init__(job)
self.processor = processor
self.processor.heartbeat = lambda: perform_heartbeat(
job=self.job,
heartbeat_callback=empty_callback,
only_if_necessary=True,
)
def _execute(self) -> int | None:
self.log.info("Starting the Dag Processor Job")
try:
self.processor.start()
except Exception:
self.log.exception("Exception when executing DagProcessorJob")
raise
finally:
self.processor.terminate()
self.processor.end()
return None
| 2,179 | 31.058824 | 88 |
py
|
airflow
|
airflow-main/airflow/jobs/local_task_job_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import signal
import psutil
from sqlalchemy.orm import Session
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.jobs.base_job_runner import BaseJobRunner
from airflow.jobs.job import Job, perform_heartbeat
from airflow.models.taskinstance import TaskInstance, TaskReturnCode
from airflow.serialization.pydantic.job import JobPydantic
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.log.file_task_handler import _set_task_deferred_context_var
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.platform import IS_WINDOWS
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import TaskInstanceState
SIGSEGV_MESSAGE = """
******************************************* Received SIGSEGV *******************************************
SIGSEGV (Segmentation Violation) signal indicates Segmentation Fault error which refers to
an attempt by a program/library to write or read outside its allocated memory.
In Python environment usually this signal refers to libraries which use low level C API.
Make sure that you use use right libraries/Docker Images
for your architecture (Intel/ARM) and/or Operational System (Linux/macOS).
Suggested way to debug
======================
- Set environment variable 'PYTHONFAULTHANDLER' to 'true'.
- Start airflow services.
- Restart failed airflow task.
- Check 'scheduler' and 'worker' services logs for additional traceback
which might contain information about module/library where actual error happen.
Known Issues
============
Note: Only Linux-based distros supported as "Production" execution environment for Airflow.
macOS
-----
1. Due to limitations in Apple's libraries not every process might 'fork' safe.
One of the general error is unable to query the macOS system configuration for network proxies.
If your are not using a proxy you could disable it by set environment variable 'no_proxy' to '*'.
See: https://github.com/python/cpython/issues/58037 and https://bugs.python.org/issue30385#msg293958
********************************************************************************************************"""
class LocalTaskJobRunner(BaseJobRunner["Job | JobPydantic"], LoggingMixin):
"""LocalTaskJob runs a single task instance."""
job_type = "LocalTaskJob"
def __init__(
self,
job: Job | JobPydantic,
task_instance: TaskInstance, # TODO add TaskInstancePydantic
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
pickle_id: int | None = None,
pool: str | None = None,
external_executor_id: str | None = None,
):
super().__init__(job)
LoggingMixin.__init__(self, context=task_instance)
self.task_instance = task_instance
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.wait_for_past_depends_before_skipping = wait_for_past_depends_before_skipping
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
self.external_executor_id = external_executor_id
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
self._state_change_checks = 0
def _execute(self) -> int | None:
from airflow.task.task_runner import get_task_runner
self.task_runner = get_task_runner(self)
def signal_handler(signum, frame):
"""Setting kill signal handler."""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.task_runner.terminate()
self.handle_task_exit(128 + signum)
def segfault_signal_handler(signum, frame):
"""Setting sigmentation violation signal handler."""
self.log.critical(SIGSEGV_MESSAGE)
self.task_runner.terminate()
self.handle_task_exit(128 + signum)
raise AirflowException("Segmentation Fault detected.")
def sigusr2_debug_handler(signum, frame):
import sys
import threading
import traceback
id2name = {th.ident: th.name for th in threading.enumerate()}
for threadId, stack in sys._current_frames().items():
print(id2name[threadId])
traceback.print_stack(f=stack)
signal.signal(signal.SIGSEGV, segfault_signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if not IS_WINDOWS:
# This is not supported on Windows systems
signal.signal(signal.SIGUSR2, sigusr2_debug_handler)
if not self.task_instance.check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
wait_for_past_depends_before_skipping=self.wait_for_past_depends_before_skipping,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=str(self.job.id),
pool=self.pool,
external_executor_id=self.external_executor_id,
):
self.log.info("Task is not able to be run")
return None
return_code = None
try:
self.task_runner.start()
local_task_job_heartbeat_sec = conf.getint("scheduler", "local_task_job_heartbeat_sec")
if local_task_job_heartbeat_sec < 1:
heartbeat_time_limit = conf.getint("scheduler", "scheduler_zombie_task_threshold")
else:
heartbeat_time_limit = local_task_job_heartbeat_sec
# LocalTaskJob should not run callbacks, which are handled by TaskInstance._run_raw_task
# 1, LocalTaskJob does not parse DAG, thus cannot run callbacks
# 2, The run_as_user of LocalTaskJob is likely not same as the TaskInstance._run_raw_task.
# When run_as_user is specified, the process owner of the LocalTaskJob must be sudoable.
# It is not secure to run callbacks with sudoable users.
# If _run_raw_task receives SIGKILL, scheduler will mark it as zombie and invoke callbacks
# If LocalTaskJob receives SIGTERM, LocalTaskJob passes SIGTERM to _run_raw_task
# If the state of task_instance is changed, LocalTaskJob sends SIGTERM to _run_raw_task
while not self.terminating:
# Monitor the task to see if it's done. Wait in a syscall
# (`os.wait`) for as long as possible so we notice the
# subprocess finishing as quick as we can
max_wait_time = max(
0, # Make sure this value is never negative,
min(
(
heartbeat_time_limit
- (timezone.utcnow() - self.job.latest_heartbeat).total_seconds() * 0.75
),
self.job.heartrate if self.job.heartrate is not None else heartbeat_time_limit,
),
)
return_code = self.task_runner.return_code(timeout=max_wait_time)
if return_code is not None:
self.handle_task_exit(return_code)
return return_code
perform_heartbeat(
job=self.job, heartbeat_callback=self.heartbeat_callback, only_if_necessary=False
)
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
# This can only really happen if the worker can't read the DB for a long time
time_since_last_heartbeat = (timezone.utcnow() - self.job.latest_heartbeat).total_seconds()
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr("local_task_job_prolonged_heartbeat_failure", 1, 1)
self.log.error("Heartbeat time limit exceeded!")
raise AirflowException(
f"Time since last heartbeat({time_since_last_heartbeat:.2f}s) exceeded limit "
f"({heartbeat_time_limit}s)."
)
return return_code
finally:
self.on_kill()
def handle_task_exit(self, return_code: int) -> None:
"""
Handle case where self.task_runner exits by itself or is externally killed.
Don't run any callbacks.
"""
# Without setting this, heartbeat may get us
self.terminating = True
self._log_return_code_metric(return_code)
is_deferral = return_code == TaskReturnCode.DEFERRED.value
if is_deferral:
self.log.info("Task exited with return code %s (task deferral)", return_code)
_set_task_deferred_context_var()
else:
self.log.info("Task exited with return code %s", return_code)
if not self.task_instance.test_mode and not is_deferral:
if conf.getboolean("scheduler", "schedule_after_task_execution", fallback=True):
self.task_instance.schedule_downstream_tasks(max_tis_per_query=self.job.max_tis_per_query)
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session: Session = NEW_SESSION) -> None:
"""Self destruct task if state has been moved away from running externally."""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == TaskInstanceState.RUNNING:
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
if not same_hostname:
self.log.error(
"The recorded hostname %s does not match this instance's hostname %s",
ti.hostname,
fqdn,
)
raise AirflowException("Hostname of job runner does not match")
current_pid = self.task_runner.get_process_pid()
recorded_pid = ti.pid
same_process = recorded_pid == current_pid
if recorded_pid is not None and (ti.run_as_user or self.task_runner.run_as_user):
# when running as another user, compare the task runner pid to the parent of
# the recorded pid because user delegation becomes an extra process level.
# However, if recorded_pid is None, pass that through as it signals the task
# runner process has already completed and been cleared out. `psutil.Process`
# uses the current process if the parameter is None, which is not what is intended
# for comparison.
recorded_pid = psutil.Process(ti.pid).ppid()
same_process = recorded_pid == current_pid
if recorded_pid is not None and not same_process and not IS_WINDOWS:
self.log.warning(
"Recorded pid %s does not match the current pid %s", recorded_pid, current_pid
)
raise AirflowException("PID of job runner does not match")
elif self.task_runner.return_code() is None and hasattr(self.task_runner, "process"):
if ti.state == TaskInstanceState.SKIPPED:
# A DagRun timeout will cause tasks to be externally marked as skipped.
dagrun = ti.get_dagrun(session=session)
execution_time = (dagrun.end_date or timezone.utcnow()) - dagrun.start_date
if ti.task.dag is not None:
dagrun_timeout = ti.task.dag.dagrun_timeout
else:
dagrun_timeout = None
if dagrun_timeout and execution_time > dagrun_timeout:
self.log.warning("DagRun timed out after %s.", str(execution_time))
# potential race condition, the _run_raw_task commits `success` or other state
# but task_runner does not exit right away due to slow process shutdown or any other reasons
# let's do a throttle here, if the above case is true, the handle_task_exit will handle it
if self._state_change_checks >= 1: # defer to next round of heartbeat
self.log.warning(
"State of this instance has been externally set to %s. Terminating instance.", ti.state
)
self.terminating = True
self._state_change_checks += 1
def _log_return_code_metric(self, return_code: int):
Stats.incr(
"local_task_job.task_exit."
f"{self.job.id}.{self.task_instance.dag_id}.{self.task_instance.task_id}.{return_code}"
)
# Same metric with tagging
Stats.incr(
"local_task_job.task_exit",
tags={
"job_id": self.job.id,
"dag_id": self.task_instance.dag_id,
"task_id": self.task_instance.task_id,
"return_code": return_code,
},
)
| 14,626 | 45.434921 | 107 |
py
|
airflow
|
airflow-main/airflow/utils/db.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import enum
import json
import logging
import os
import sys
import time
import warnings
from dataclasses import dataclass
from tempfile import gettempdir
from typing import TYPE_CHECKING, Callable, Generator, Iterable
from sqlalchemy import Table, and_, column, delete, exc, func, inspect, or_, select, table, text, tuple_
import airflow
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models import import_all_models
from airflow.utils import helpers
# TODO: remove create_session once we decide to break backward compatibility
from airflow.utils.session import NEW_SESSION, create_session, provide_session # noqa: F401
if TYPE_CHECKING:
from alembic.runtime.environment import EnvironmentContext
from alembic.script import ScriptDirectory
from sqlalchemy.orm import Query, Session
from airflow.models.base import Base
from airflow.models.connection import Connection
log = logging.getLogger(__name__)
REVISION_HEADS_MAP = {
"2.0.0": "e959f08ac86c",
"2.0.1": "82b7c48c147f",
"2.0.2": "2e42bb497a22",
"2.1.0": "a13f7613ad25",
"2.1.1": "a13f7613ad25",
"2.1.2": "a13f7613ad25",
"2.1.3": "97cdd93827b8",
"2.1.4": "ccde3e26fe78",
"2.2.0": "7b2661a43ba3",
"2.2.1": "7b2661a43ba3",
"2.2.2": "7b2661a43ba3",
"2.2.3": "be2bfac3da23",
"2.2.4": "587bdf053233",
"2.2.5": "587bdf053233",
"2.3.0": "b1b348e02d07",
"2.3.1": "1de7bc13c950",
"2.3.2": "3c94c427fdf6",
"2.3.3": "f5fcbda3e651",
"2.3.4": "f5fcbda3e651",
"2.4.0": "ecb43d2a1842",
"2.4.1": "ecb43d2a1842",
"2.4.2": "b0d31815b5a6",
"2.4.3": "e07f49787c9d",
"2.5.0": "290244fb8b83",
"2.5.1": "290244fb8b83",
"2.5.2": "290244fb8b83",
"2.5.3": "290244fb8b83",
"2.6.0": "98ae134e6fff",
"2.6.1": "98ae134e6fff",
"2.6.2": "c804e5c76e3e",
"2.6.3": "c804e5c76e3e",
}
def _format_airflow_moved_table_name(source_table, version, category):
return "__".join([settings.AIRFLOW_MOVED_TABLE_PREFIX, version.replace(".", "_"), category, source_table])
@provide_session
def merge_conn(conn: Connection, session: Session = NEW_SESSION):
"""Add new Connection."""
if not session.scalar(select(1).where(conn.__class__.conn_id == conn.conn_id)):
session.add(conn)
session.commit()
@provide_session
def add_default_pool_if_not_exists(session: Session = NEW_SESSION):
"""Add default pool if it does not exist."""
from airflow.models.pool import Pool
if not Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session):
default_pool = Pool(
pool=Pool.DEFAULT_POOL_NAME,
slots=conf.getint(section="core", key="default_pool_task_slot_count"),
description="Default pool",
)
session.add(default_pool)
session.commit()
@provide_session
def create_default_connections(session: Session = NEW_SESSION):
"""Create default Airflow connections."""
from airflow.models.connection import Connection
merge_conn(
Connection(
conn_id="airflow_db",
conn_type="mysql",
host="mysql",
login="root",
password="",
schema="airflow",
),
session,
)
merge_conn(
Connection(
conn_id="aws_default",
conn_type="aws",
),
session,
)
merge_conn(
Connection(
conn_id="azure_batch_default",
conn_type="azure_batch",
login="<ACCOUNT_NAME>",
password="",
extra="""{"account_url": "<ACCOUNT_URL>"}""",
)
)
merge_conn(
Connection(
conn_id="azure_cosmos_default",
conn_type="azure_cosmos",
extra='{"database_name": "<DATABASE_NAME>", "collection_name": "<COLLECTION_NAME>" }',
),
session,
)
merge_conn(
Connection(
conn_id="azure_data_explorer_default",
conn_type="azure_data_explorer",
host="https://<CLUSTER>.kusto.windows.net",
extra="""{"auth_method": "<AAD_APP | AAD_APP_CERT | AAD_CREDS | AAD_DEVICE>",
"tenant": "<TENANT ID>", "certificate": "<APPLICATION PEM CERTIFICATE>",
"thumbprint": "<APPLICATION CERTIFICATE THUMBPRINT>"}""",
),
session,
)
merge_conn(
Connection(
conn_id="azure_data_lake_default",
conn_type="azure_data_lake",
extra='{"tenant": "<TENANT>", "account_name": "<ACCOUNTNAME>" }',
),
session,
)
merge_conn(
Connection(
conn_id="azure_default",
conn_type="azure",
),
session,
)
merge_conn(
Connection(
conn_id="cassandra_default",
conn_type="cassandra",
host="cassandra",
port=9042,
),
session,
)
merge_conn(
Connection(
conn_id="databricks_default",
conn_type="databricks",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="dingding_default",
conn_type="http",
host="",
password="",
),
session,
)
merge_conn(
Connection(
conn_id="drill_default",
conn_type="drill",
host="localhost",
port=8047,
extra='{"dialect_driver": "drill+sadrill", "storage_plugin": "dfs"}',
),
session,
)
merge_conn(
Connection(
conn_id="druid_broker_default",
conn_type="druid",
host="druid-broker",
port=8082,
extra='{"endpoint": "druid/v2/sql"}',
),
session,
)
merge_conn(
Connection(
conn_id="druid_ingest_default",
conn_type="druid",
host="druid-overlord",
port=8081,
extra='{"endpoint": "druid/indexer/v1/task"}',
),
session,
)
merge_conn(
Connection(
conn_id="elasticsearch_default",
conn_type="elasticsearch",
host="localhost",
schema="http",
port=9200,
),
session,
)
merge_conn(
Connection(
conn_id="emr_default",
conn_type="emr",
extra="""
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"Ec2KeyName": "mykey",
"Ec2SubnetId": "somesubnet",
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Core nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
],
"TerminationProtected": false,
"KeepJobFlowAliveWhenNoSteps": false
},
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
""",
),
session,
)
merge_conn(
Connection(
conn_id="facebook_default",
conn_type="facebook_social",
extra="""
{ "account_id": "<AD_ACCOUNT_ID>",
"app_id": "<FACEBOOK_APP_ID>",
"app_secret": "<FACEBOOK_APP_SECRET>",
"access_token": "<FACEBOOK_AD_ACCESS_TOKEN>"
}
""",
),
session,
)
merge_conn(
Connection(
conn_id="fs_default",
conn_type="fs",
extra='{"path": "/"}',
),
session,
)
merge_conn(
Connection(
conn_id="ftp_default",
conn_type="ftp",
host="localhost",
port=21,
login="airflow",
password="airflow",
extra='{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}',
),
session,
)
merge_conn(
Connection(
conn_id="google_cloud_default",
conn_type="google_cloud_platform",
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="hive_cli_default",
conn_type="hive_cli",
port=10000,
host="localhost",
extra='{"use_beeline": true, "auth": ""}',
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="hiveserver2_default",
conn_type="hiveserver2",
host="localhost",
schema="default",
port=10000,
),
session,
)
merge_conn(
Connection(
conn_id="http_default",
conn_type="http",
host="https://www.httpbin.org/",
),
session,
)
merge_conn(Connection(conn_id="impala_default", conn_type="impala", host="localhost", port=21050))
merge_conn(
Connection(
conn_id="kafka_default",
conn_type="kafka",
extra=json.dumps({"bootstrap.servers": "broker:29092"}),
),
session,
)
merge_conn(
Connection(
conn_id="kubernetes_default",
conn_type="kubernetes",
),
session,
)
merge_conn(
Connection(
conn_id="kylin_default",
conn_type="kylin",
host="localhost",
port=7070,
login="ADMIN",
password="KYLIN",
),
session,
)
merge_conn(
Connection(
conn_id="leveldb_default",
conn_type="leveldb",
host="localhost",
),
session,
)
merge_conn(Connection(conn_id="livy_default", conn_type="livy", host="livy", port=8998), session)
merge_conn(
Connection(
conn_id="local_mysql",
conn_type="mysql",
host="localhost",
login="airflow",
password="airflow",
schema="airflow",
),
session,
)
merge_conn(
Connection(
conn_id="metastore_default",
conn_type="hive_metastore",
host="localhost",
extra='{"authMechanism": "PLAIN"}',
port=9083,
),
session,
)
merge_conn(Connection(conn_id="mongo_default", conn_type="mongo", host="mongo", port=27017), session)
merge_conn(
Connection(
conn_id="mssql_default",
conn_type="mssql",
host="localhost",
port=1433,
),
session,
)
merge_conn(
Connection(
conn_id="mysql_default",
conn_type="mysql",
login="root",
schema="airflow",
host="mysql",
),
session,
)
merge_conn(
Connection(
conn_id="opsgenie_default",
conn_type="http",
host="",
password="",
),
session,
)
merge_conn(
Connection(
conn_id="oracle_default",
conn_type="oracle",
host="localhost",
login="root",
password="password",
schema="schema",
port=1521,
),
session,
)
merge_conn(
Connection(
conn_id="oss_default",
conn_type="oss",
extra="""{
"auth_type": "AK",
"access_key_id": "<ACCESS_KEY_ID>",
"access_key_secret": "<ACCESS_KEY_SECRET>",
"region": "<YOUR_OSS_REGION>"}
""",
),
session,
)
merge_conn(
Connection(
conn_id="pig_cli_default",
conn_type="pig_cli",
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="pinot_admin_default",
conn_type="pinot",
host="localhost",
port=9000,
),
session,
)
merge_conn(
Connection(
conn_id="pinot_broker_default",
conn_type="pinot",
host="localhost",
port=9000,
extra='{"endpoint": "/query", "schema": "http"}',
),
session,
)
merge_conn(
Connection(
conn_id="postgres_default",
conn_type="postgres",
login="postgres",
password="airflow",
schema="airflow",
host="postgres",
),
session,
)
merge_conn(
Connection(
conn_id="presto_default",
conn_type="presto",
host="localhost",
schema="hive",
port=3400,
),
session,
)
merge_conn(
Connection(
conn_id="qubole_default",
conn_type="qubole",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="redis_default",
conn_type="redis",
host="redis",
port=6379,
extra='{"db": 0}',
),
session,
)
merge_conn(
Connection(
conn_id="redshift_default",
conn_type="redshift",
extra="""{
"iam": true,
"cluster_identifier": "<REDSHIFT_CLUSTER_IDENTIFIER>",
"port": 5439,
"profile": "default",
"db_user": "awsuser",
"database": "dev",
"region": ""
}""",
),
session,
)
merge_conn(
Connection(
conn_id="salesforce_default",
conn_type="salesforce",
login="username",
password="password",
extra='{"security_token": "security_token"}',
),
session,
)
merge_conn(
Connection(
conn_id="segment_default",
conn_type="segment",
extra='{"write_key": "my-segment-write-key"}',
),
session,
)
merge_conn(
Connection(
conn_id="sftp_default",
conn_type="sftp",
host="localhost",
port=22,
login="airflow",
extra='{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}',
),
session,
)
merge_conn(
Connection(
conn_id="spark_default",
conn_type="spark",
host="yarn",
extra='{"queue": "root.default"}',
),
session,
)
merge_conn(
Connection(
conn_id="sqlite_default",
conn_type="sqlite",
host=os.path.join(gettempdir(), "sqlite_default.db"),
),
session,
)
merge_conn(
Connection(
conn_id="sqoop_default",
conn_type="sqoop",
host="rdbms",
),
session,
)
merge_conn(
Connection(
conn_id="ssh_default",
conn_type="ssh",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="tableau_default",
conn_type="tableau",
host="https://tableau.server.url",
login="user",
password="password",
extra='{"site_id": "my_site"}',
),
session,
)
merge_conn(
Connection(
conn_id="tabular_default",
conn_type="tabular",
host="https://api.tabulardata.io/ws/v1",
),
session,
)
merge_conn(
Connection(
conn_id="trino_default",
conn_type="trino",
host="localhost",
schema="hive",
port=3400,
),
session,
)
merge_conn(
Connection(
conn_id="vertica_default",
conn_type="vertica",
host="localhost",
port=5433,
),
session,
)
merge_conn(
Connection(
conn_id="wasb_default",
conn_type="wasb",
extra='{"sas_token": null}',
),
session,
)
merge_conn(
Connection(
conn_id="webhdfs_default",
conn_type="hdfs",
host="localhost",
port=50070,
),
session,
)
merge_conn(
Connection(
conn_id="yandexcloud_default",
conn_type="yandexcloud",
schema="default",
),
session,
)
def _get_flask_db(sql_database_uri):
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from airflow.www.session import AirflowDatabaseSessionInterface
flask_app = Flask(__name__)
flask_app.config["SQLALCHEMY_DATABASE_URI"] = sql_database_uri
flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(flask_app)
AirflowDatabaseSessionInterface(app=flask_app, db=db, table="session", key_prefix="")
return db
def _create_db_from_orm(session):
from alembic import command
from airflow.models.base import Base
from airflow.www.fab_security.sqla.models import Model
def _create_flask_session_tbl(sql_database_uri):
db = _get_flask_db(sql_database_uri)
db.create_all()
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS):
engine = session.get_bind().engine
Base.metadata.create_all(engine)
Model.metadata.create_all(engine)
_create_flask_session_tbl(engine.url)
# stamp the migration head
config = _get_alembic_config()
command.stamp(config, "head")
@provide_session
def initdb(session: Session = NEW_SESSION, load_connections: bool = True):
"""Initialize Airflow database."""
import_all_models()
db_exists = _get_current_revision(session)
if db_exists:
upgradedb(session=session)
else:
_create_db_from_orm(session=session)
# Load default connections
if conf.getboolean("database", "LOAD_DEFAULT_CONNECTIONS") and load_connections:
create_default_connections(session=session)
# Add default pool & sync log_template
add_default_pool_if_not_exists(session=session)
synchronize_log_template(session=session)
def _get_alembic_config():
from alembic.config import Config
package_dir = os.path.dirname(airflow.__file__)
directory = os.path.join(package_dir, "migrations")
alembic_file = conf.get("database", "alembic_ini_file_path")
if os.path.isabs(alembic_file):
config = Config(alembic_file)
else:
config = Config(os.path.join(package_dir, alembic_file))
config.set_main_option("script_location", directory.replace("%", "%%"))
config.set_main_option("sqlalchemy.url", settings.SQL_ALCHEMY_CONN.replace("%", "%%"))
return config
def _get_script_object(config=None) -> ScriptDirectory:
from alembic.script import ScriptDirectory
if not config:
config = _get_alembic_config()
return ScriptDirectory.from_config(config)
def _get_current_revision(session):
from alembic.migration import MigrationContext
conn = session.connection()
migration_ctx = MigrationContext.configure(conn)
return migration_ctx.get_current_revision()
def check_migrations(timeout):
"""
Function to wait for all airflow migrations to complete.
:param timeout: Timeout for the migration in seconds
:return: None
"""
timeout = timeout or 1 # run the loop at least 1
with _configured_alembic_environment() as env:
context = env.get_context()
source_heads = None
db_heads = None
for ticker in range(timeout):
source_heads = set(env.script.get_heads())
db_heads = set(context.get_current_heads())
if source_heads == db_heads:
return
time.sleep(1)
log.info("Waiting for migrations... %s second(s)", ticker)
raise TimeoutError(
f"There are still unapplied migrations after {timeout} seconds. Migration"
f"Head(s) in DB: {db_heads} | Migration Head(s) in Source Code: {source_heads}"
)
@contextlib.contextmanager
def _configured_alembic_environment() -> Generator[EnvironmentContext, None, None]:
from alembic.runtime.environment import EnvironmentContext
config = _get_alembic_config()
script = _get_script_object(config)
with EnvironmentContext(
config,
script,
) as env, settings.engine.connect() as connection:
alembic_logger = logging.getLogger("alembic")
level = alembic_logger.level
alembic_logger.setLevel(logging.WARNING)
env.configure(connection)
alembic_logger.setLevel(level)
yield env
def check_and_run_migrations():
"""Check and run migrations if necessary. Only use in a tty."""
with _configured_alembic_environment() as env:
context = env.get_context()
source_heads = set(env.script.get_heads())
db_heads = set(context.get_current_heads())
db_command = None
command_name = None
verb = None
if len(db_heads) < 1:
db_command = initdb
command_name = "init"
verb = "initialize"
elif source_heads != db_heads:
db_command = upgradedb
command_name = "upgrade"
verb = "upgrade"
if sys.stdout.isatty() and verb:
print()
question = f"Please confirm database {verb} (or wait 4 seconds to skip it). Are you sure? [y/N]"
try:
answer = helpers.prompt_with_timeout(question, timeout=4, default=False)
if answer:
try:
db_command()
print(f"DB {verb} done")
except Exception as error:
from airflow.version import version
print(error)
print(
"You still have unapplied migrations. "
f"You may need to {verb} the database by running `airflow db {command_name}`. ",
f"Make sure the command is run using Airflow version {version}.",
file=sys.stderr,
)
sys.exit(1)
except AirflowException:
pass
elif source_heads != db_heads:
from airflow.version import version
print(
f"ERROR: You need to {verb} the database. Please run `airflow db {command_name}`. "
f"Make sure the command is run using Airflow version {version}.",
file=sys.stderr,
)
sys.exit(1)
def _reserialize_dags(*, session: Session) -> None:
from airflow.models.dagbag import DagBag
from airflow.models.serialized_dag import SerializedDagModel
session.execute(delete(SerializedDagModel).execution_options(synchronize_session=False))
dagbag = DagBag(collect_dags=False)
dagbag.collect_dags(only_if_updated=False)
dagbag.sync_to_db(session=session)
@provide_session
def synchronize_log_template(*, session: Session = NEW_SESSION) -> None:
"""Synchronize log template configs with table.
This checks if the last row fully matches the current config values, and
insert a new row if not.
"""
# NOTE: SELECT queries in this function are INTENTIONALLY written with the
# SQL builder style, not the ORM query API. This avoids configuring the ORM
# unless we need to insert something, speeding up CLI in general.
from airflow.models.tasklog import LogTemplate
metadata = reflect_tables([LogTemplate], session)
log_template_table: Table | None = metadata.tables.get(LogTemplate.__tablename__)
if log_template_table is None:
log.info("Log template table does not exist (added in 2.3.0); skipping log template sync.")
return
filename = conf.get("logging", "log_filename_template")
elasticsearch_id = conf.get("elasticsearch", "log_id_template")
stored = session.execute(
select(
log_template_table.c.filename,
log_template_table.c.elasticsearch_id,
).order_by(log_template_table.c.id.desc()),
).first()
# If we have an empty table, and the default values exist, we will seed the
# table with values from pre 2.3.0, so old logs will still be retrievable.
if not stored:
is_default_log_id = elasticsearch_id == conf.airflow_defaults.get("elasticsearch", "log_id_template")
is_default_filename = filename == conf.airflow_defaults.get("logging", "log_filename_template")
if is_default_log_id and is_default_filename:
session.add(
LogTemplate(
filename="{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log",
elasticsearch_id="{dag_id}-{task_id}-{execution_date}-{try_number}",
)
)
# Before checking if the _current_ value exists, we need to check if the old config value we upgraded in
# place exists!
pre_upgrade_filename = conf.upgraded_values.get(("logging", "log_filename_template"), filename)
pre_upgrade_elasticsearch_id = conf.upgraded_values.get(
("elasticsearch", "log_id_template"), elasticsearch_id
)
if pre_upgrade_filename != filename or pre_upgrade_elasticsearch_id != elasticsearch_id:
# The previous non-upgraded value likely won't be the _latest_ value (as after we've recorded the
# recorded the upgraded value it will be second-to-newest), so we'll have to just search which is okay
# as this is a table with a tiny number of rows
row = session.execute(
select(log_template_table.c.id)
.where(
or_(
log_template_table.c.filename == pre_upgrade_filename,
log_template_table.c.elasticsearch_id == pre_upgrade_elasticsearch_id,
)
)
.order_by(log_template_table.c.id.desc())
).first()
if not row:
session.add(
LogTemplate(filename=pre_upgrade_filename, elasticsearch_id=pre_upgrade_elasticsearch_id)
)
if not stored or stored.filename != filename or stored.elasticsearch_id != elasticsearch_id:
session.add(LogTemplate(filename=filename, elasticsearch_id=elasticsearch_id))
def check_conn_id_duplicates(session: Session) -> Iterable[str]:
"""
Check unique conn_id in connection table.
:param session: session of the sqlalchemy
"""
from airflow.models.connection import Connection
try:
dups = session.scalars(
select(Connection.conn_id).group_by(Connection.conn_id).having(func.count() > 1)
).all()
except (exc.OperationalError, exc.ProgrammingError):
# fallback if tables hasn't been created yet
session.rollback()
return
if dups:
yield (
"Seems you have non unique conn_id in connection table.\n"
"You have to manage those duplicate connections "
"before upgrading the database.\n"
f"Duplicated conn_id: {dups}"
)
def check_username_duplicates(session: Session) -> Iterable[str]:
"""
Check unique username in User & RegisterUser table.
:param session: session of the sqlalchemy
:rtype: str
"""
from airflow.www.fab_security.sqla.models import RegisterUser, User
for model in [User, RegisterUser]:
dups = []
try:
dups = session.execute(
select(model.username) # type: ignore[attr-defined]
.group_by(model.username) # type: ignore[attr-defined]
.having(func.count() > 1)
).all()
except (exc.OperationalError, exc.ProgrammingError):
# fallback if tables hasn't been created yet
session.rollback()
if dups:
yield (
f"Seems you have mixed case usernames in {model.__table__.name} table.\n" # type: ignore
"You have to rename or delete those mixed case usernames "
"before upgrading the database.\n"
f"usernames with mixed cases: {[dup.username for dup in dups]}"
)
def reflect_tables(tables: list[Base | str] | None, session):
"""
When running checks prior to upgrades, we use reflection to determine current state of the database.
This function gets the current state of each table in the set of models
provided and returns a SqlAlchemy metadata object containing them.
"""
import sqlalchemy.schema
bind = session.bind
metadata = sqlalchemy.schema.MetaData()
if tables is None:
metadata.reflect(bind=bind, resolve_fks=False)
else:
for tbl in tables:
try:
table_name = tbl if isinstance(tbl, str) else tbl.__tablename__
metadata.reflect(bind=bind, only=[table_name], extend_existing=True, resolve_fks=False)
except exc.InvalidRequestError:
continue
return metadata
def check_table_for_duplicates(
*, session: Session, table_name: str, uniqueness: list[str], version: str
) -> Iterable[str]:
"""
Check table for duplicates, given a list of columns which define the uniqueness of the table.
Usage example:
.. code-block:: python
def check_task_fail_for_duplicates(session):
from airflow.models.taskfail import TaskFail
metadata = reflect_tables([TaskFail], session)
task_fail = metadata.tables.get(TaskFail.__tablename__) # type: ignore
if task_fail is None: # table not there
return
if "run_id" in task_fail.columns: # upgrade already applied
return
yield from check_table_for_duplicates(
table_name=task_fail.name,
uniqueness=["dag_id", "task_id", "execution_date"],
session=session,
version="2.3",
)
:param table_name: table name to check
:param uniqueness: uniqueness constraint to evaluate against
:param session: session of the sqlalchemy
"""
minimal_table_obj = table(table_name, *(column(x) for x in uniqueness))
try:
subquery = session.execute(
select(minimal_table_obj, func.count().label("dupe_count"))
.group_by(*(text(x) for x in uniqueness))
.having(func.count() > text("1"))
.subquery()
)
dupe_count = session.scalar(select(func.sum(subquery.c.dupe_count)))
if not dupe_count:
# there are no duplicates; nothing to do.
return
log.warning("Found %s duplicates in table %s. Will attempt to move them.", dupe_count, table_name)
metadata = reflect_tables(tables=[table_name], session=session)
if table_name not in metadata.tables:
yield f"Table {table_name} does not exist in the database."
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
table_obj = metadata.tables[table_name]
_move_duplicate_data_to_new_table(
session=session,
source_table=table_obj,
subquery=subquery,
uniqueness=uniqueness,
target_table_name=_format_airflow_moved_table_name(table_name, version, "duplicates"),
)
except (exc.OperationalError, exc.ProgrammingError):
# fallback if `table_name` hasn't been created yet
session.rollback()
def check_conn_type_null(session: Session) -> Iterable[str]:
"""
Check nullable conn_type column in Connection table.
:param session: session of the sqlalchemy
"""
from airflow.models.connection import Connection
try:
n_nulls = session.scalars(select(Connection.conn_id).where(Connection.conn_type.is_(None))).all()
except (exc.OperationalError, exc.ProgrammingError, exc.InternalError):
# fallback if tables hasn't been created yet
session.rollback()
return
if n_nulls:
yield (
"The conn_type column in the connection "
"table must contain content.\n"
"Make sure you don't have null "
"in the conn_type column.\n"
f"Null conn_type conn_id: {n_nulls}"
)
def _format_dangling_error(source_table, target_table, invalid_count, reason):
noun = "row" if invalid_count == 1 else "rows"
return (
f"The {source_table} table has {invalid_count} {noun} {reason}, which "
f"is invalid. We could not move them out of the way because the "
f"{target_table} table already exists in your database. Please either "
f"drop the {target_table} table, or manually delete the invalid rows "
f"from the {source_table} table."
)
def check_run_id_null(session: Session) -> Iterable[str]:
from airflow.models.dagrun import DagRun
metadata = reflect_tables([DagRun], session)
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
dagrun_table = metadata.tables.get(DagRun.__tablename__)
if dagrun_table is None:
return
invalid_dagrun_filter = or_(
dagrun_table.c.dag_id.is_(None),
dagrun_table.c.run_id.is_(None),
dagrun_table.c.execution_date.is_(None),
)
invalid_dagrun_count = session.scalar(select(func.count(dagrun_table.c.id)).where(invalid_dagrun_filter))
if invalid_dagrun_count > 0:
dagrun_dangling_table_name = _format_airflow_moved_table_name(dagrun_table.name, "2.2", "dangling")
if dagrun_dangling_table_name in inspect(session.get_bind()).get_table_names():
yield _format_dangling_error(
source_table=dagrun_table.name,
target_table=dagrun_dangling_table_name,
invalid_count=invalid_dagrun_count,
reason="with a NULL dag_id, run_id, or execution_date",
)
return
bind = session.get_bind()
dialect_name = bind.dialect.name
_create_table_as(
dialect_name=dialect_name,
source_query=dagrun_table.select(invalid_dagrun_filter),
target_table_name=dagrun_dangling_table_name,
source_table_name=dagrun_table.name,
session=session,
)
delete = dagrun_table.delete().where(invalid_dagrun_filter)
session.execute(delete)
def _create_table_as(
*,
session,
dialect_name: str,
source_query: Query,
target_table_name: str,
source_table_name: str,
):
"""
Create a new table with rows from query.
We have to handle CTAS differently for different dialects.
"""
from sqlalchemy import column, select, table
if dialect_name == "mssql":
cte = source_query.cte("source")
moved_data_tbl = table(target_table_name, *(column(c.name) for c in cte.columns))
ins = moved_data_tbl.insert().from_select(list(cte.columns), select(cte))
stmt = ins.compile(bind=session.get_bind())
cte_sql = stmt.ctes[cte]
session.execute(text(f"WITH {cte_sql} SELECT source.* INTO {target_table_name} FROM source"))
elif dialect_name == "mysql":
# MySQL with replication needs this split in to two queries, so just do it for all MySQL
# ERROR 1786 (HY000): Statement violates GTID consistency: CREATE TABLE ... SELECT.
session.execute(text(f"CREATE TABLE {target_table_name} LIKE {source_table_name}"))
session.execute(
text(
f"INSERT INTO {target_table_name} {source_query.selectable.compile(bind=session.get_bind())}"
)
)
else:
# Postgres and SQLite both support the same "CREATE TABLE a AS SELECT ..." syntax
session.execute(
f"CREATE TABLE {target_table_name} AS {source_query.selectable.compile(bind=session.get_bind())}"
)
def _move_dangling_data_to_new_table(
session, source_table: Table, source_query: Query, target_table_name: str
):
bind = session.get_bind()
dialect_name = bind.dialect.name
# First: Create moved rows from new table
log.debug("running CTAS for table %s", target_table_name)
_create_table_as(
dialect_name=dialect_name,
source_query=source_query,
target_table_name=target_table_name,
source_table_name=source_table.name,
session=session,
)
session.commit()
target_table = source_table.to_metadata(source_table.metadata, name=target_table_name)
log.debug("checking whether rows were moved for table %s", target_table_name)
moved_rows_exist_query = select(1).select_from(target_table).limit(1)
first_moved_row = session.execute(moved_rows_exist_query).all()
session.commit()
if not first_moved_row:
log.debug("no rows moved; dropping %s", target_table_name)
# no bad rows were found; drop moved rows table.
target_table.drop(bind=session.get_bind(), checkfirst=True)
else:
log.debug("rows moved; purging from %s", source_table.name)
if dialect_name == "sqlite":
pk_cols = source_table.primary_key.columns
delete = source_table.delete().where(
tuple_(*pk_cols).in_(session.select(*target_table.primary_key.columns).subquery())
)
else:
delete = source_table.delete().where(
and_(col == target_table.c[col.name] for col in source_table.primary_key.columns)
)
log.debug(delete.compile())
session.execute(delete)
session.commit()
log.debug("exiting move function")
def _dangling_against_dag_run(session, source_table, dag_run):
"""Given a source table, we generate a subquery that will return 1 for every row that has a dagrun."""
source_to_dag_run_join_cond = and_(
source_table.c.dag_id == dag_run.c.dag_id,
source_table.c.execution_date == dag_run.c.execution_date,
)
return (
select(*(c.label(c.name) for c in source_table.c))
.join(dag_run, source_to_dag_run_join_cond, isouter=True)
.where(dag_run.c.dag_id.is_(None))
)
def _dangling_against_task_instance(session, source_table, dag_run, task_instance):
"""
Given a source table, generate a subquery that will return 1 for every row that has a valid task instance.
This is used to identify rows that need to be removed from tables prior to adding a TI fk.
Since this check is applied prior to running the migrations, we have to use different
query logic depending on which revision the database is at.
"""
if "run_id" not in task_instance.c:
# db is < 2.2.0
dr_join_cond = and_(
source_table.c.dag_id == dag_run.c.dag_id,
source_table.c.execution_date == dag_run.c.execution_date,
)
ti_join_cond = and_(
dag_run.c.dag_id == task_instance.c.dag_id,
dag_run.c.execution_date == task_instance.c.execution_date,
source_table.c.task_id == task_instance.c.task_id,
)
else:
# db is 2.2.0 <= version < 2.3.0
dr_join_cond = and_(
source_table.c.dag_id == dag_run.c.dag_id,
source_table.c.execution_date == dag_run.c.execution_date,
)
ti_join_cond = and_(
dag_run.c.dag_id == task_instance.c.dag_id,
dag_run.c.run_id == task_instance.c.run_id,
source_table.c.task_id == task_instance.c.task_id,
)
return (
select(*(c.label(c.name) for c in source_table.c))
.outerjoin(dag_run, dr_join_cond)
.outerjoin(task_instance, ti_join_cond)
.where(or_(task_instance.c.dag_id.is_(None), dag_run.c.dag_id.is_(None)))
)
def _move_duplicate_data_to_new_table(
session, source_table: Table, subquery: Query, uniqueness: list[str], target_table_name: str
):
"""
When adding a uniqueness constraint we first should ensure that there are no duplicate rows.
This function accepts a subquery that should return one record for each row with duplicates (e.g.
a group by with having count(*) > 1). We select from ``source_table`` getting all rows matching the
subquery result and store in ``target_table_name``. Then to purge the duplicates from the source table,
we do a DELETE FROM with a join to the target table (which now contains the dupes).
:param session: sqlalchemy session for metadata db
:param source_table: table to purge dupes from
:param subquery: the subquery that returns the duplicate rows
:param uniqueness: the string list of columns used to define the uniqueness for the table. used in
building the DELETE FROM join condition.
:param target_table_name: name of the table in which to park the duplicate rows
"""
bind = session.get_bind()
dialect_name = bind.dialect.name
query = (
select(*(source_table.c[x.name].label(str(x.name)) for x in source_table.columns))
.select_from(source_table)
.join(subquery, and_(*(source_table.c[x] == subquery.c[x] for x in uniqueness)))
)
_create_table_as(
session=session,
dialect_name=dialect_name,
source_query=query,
target_table_name=target_table_name,
source_table_name=source_table.name,
)
# we must ensure that the CTAS table is created prior to the DELETE step since we have to join to it
session.commit()
metadata = reflect_tables([target_table_name], session)
target_table = metadata.tables[target_table_name]
where_clause = and_(*(source_table.c[x] == target_table.c[x] for x in uniqueness))
if dialect_name == "sqlite":
subq = query.selectable.with_only_columns([text(f"{source_table}.ROWID")])
delete = source_table.delete().where(column("ROWID").in_(subq))
else:
delete = source_table.delete(where_clause)
session.execute(delete)
def check_bad_references(session: Session) -> Iterable[str]:
"""
Starting in Airflow 2.2, we began a process of replacing `execution_date` with `run_id` in many tables.
Here we go through each table and look for records that can't be mapped to a dag run.
When we find such "dangling" rows we back them up in a special table and delete them
from the main table.
"""
from airflow.models.dagrun import DagRun
from airflow.models.renderedtifields import RenderedTaskInstanceFields
from airflow.models.taskfail import TaskFail
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskreschedule import TaskReschedule
from airflow.models.xcom import XCom
@dataclass
class BadReferenceConfig:
"""
Bad reference config class.
:param bad_rows_func: function that returns subquery which determines whether bad rows exist
:param join_tables: table objects referenced in subquery
:param ref_table: information-only identifier for categorizing the missing ref
"""
bad_rows_func: Callable
join_tables: list[str]
ref_table: str
missing_dag_run_config = BadReferenceConfig(
bad_rows_func=_dangling_against_dag_run,
join_tables=["dag_run"],
ref_table="dag_run",
)
missing_ti_config = BadReferenceConfig(
bad_rows_func=_dangling_against_task_instance,
join_tables=["dag_run", "task_instance"],
ref_table="task_instance",
)
models_list: list[tuple[Base, str, BadReferenceConfig]] = [
(TaskInstance, "2.2", missing_dag_run_config),
(TaskReschedule, "2.2", missing_ti_config),
(RenderedTaskInstanceFields, "2.3", missing_ti_config),
(TaskFail, "2.3", missing_ti_config),
(XCom, "2.3", missing_ti_config),
]
metadata = reflect_tables([*(x[0] for x in models_list), DagRun, TaskInstance], session)
if (
not metadata.tables
or metadata.tables.get(DagRun.__tablename__) is None
or metadata.tables.get(TaskInstance.__tablename__) is None
):
# Key table doesn't exist -- likely empty DB.
return
existing_table_names = set(inspect(session.get_bind()).get_table_names())
errored = False
for model, change_version, bad_ref_cfg in models_list:
log.debug("checking model %s", model.__tablename__)
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
source_table = metadata.tables.get(model.__tablename__) # type: ignore
if source_table is None:
continue
# Migration already applied, don't check again.
if "run_id" in source_table.columns:
continue
func_kwargs = {x: metadata.tables[x] for x in bad_ref_cfg.join_tables}
bad_rows_query = bad_ref_cfg.bad_rows_func(session, source_table, **func_kwargs)
dangling_table_name = _format_airflow_moved_table_name(source_table.name, change_version, "dangling")
if dangling_table_name in existing_table_names:
invalid_row_count = bad_rows_query.count()
if invalid_row_count <= 0:
continue
else:
yield _format_dangling_error(
source_table=source_table.name,
target_table=dangling_table_name,
invalid_count=invalid_row_count,
reason=f"without a corresponding {bad_ref_cfg.ref_table} row",
)
errored = True
continue
log.debug("moving data for table %s", source_table.name)
_move_dangling_data_to_new_table(
session,
source_table,
bad_rows_query,
dangling_table_name,
)
if errored:
session.rollback()
else:
session.commit()
@provide_session
def _check_migration_errors(session: Session = NEW_SESSION) -> Iterable[str]:
""":session: session of the sqlalchemy."""
check_functions: tuple[Callable[..., Iterable[str]], ...] = (
check_conn_id_duplicates,
check_conn_type_null,
check_run_id_null,
check_bad_references,
check_username_duplicates,
)
for check_fn in check_functions:
log.debug("running check function %s", check_fn.__name__)
yield from check_fn(session=session)
# Ensure there is no "active" transaction. Seems odd, but without this MSSQL can hang
session.commit()
def _offline_migration(migration_func: Callable, config, revision):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
logging.disable(logging.CRITICAL)
migration_func(config, revision, sql=True)
logging.disable(logging.NOTSET)
def print_happy_cat(message):
if sys.stdout.isatty():
size = os.get_terminal_size().columns
else:
size = 0
print(message.center(size))
print("""/\\_/\\""".center(size))
print("""(='_' )""".center(size))
print("""(,(") (")""".center(size))
print("""^^^""".center(size))
return
def _revision_greater(config, this_rev, base_rev):
# Check if there is history between the revisions and the start revision
# This ensures that the revisions are above `min_revision`
script = _get_script_object(config)
try:
list(script.revision_map.iterate_revisions(upper=this_rev, lower=base_rev))
return True
except Exception:
return False
def _revisions_above_min_for_offline(config, revisions) -> None:
"""
Checks that all supplied revision ids are above the minimum revision for the dialect.
:param config: Alembic config
:param revisions: list of Alembic revision ids
:return: None
"""
dbname = settings.engine.dialect.name
if dbname == "sqlite":
raise AirflowException("Offline migration not supported for SQLite.")
min_version, min_revision = ("2.2.0", "7b2661a43ba3") if dbname == "mssql" else ("2.0.0", "e959f08ac86c")
# Check if there is history between the revisions and the start revision
# This ensures that the revisions are above `min_revision`
for rev in revisions:
if not _revision_greater(config, rev, min_revision):
raise ValueError(
f"Error while checking history for revision range {min_revision}:{rev}. "
f"Check that {rev} is a valid revision. "
f"For dialect {dbname!r}, supported revision for offline migration is from {min_revision} "
f"which corresponds to Airflow {min_version}."
)
@provide_session
def upgradedb(
*,
to_revision: str | None = None,
from_revision: str | None = None,
show_sql_only: bool = False,
reserialize_dags: bool = True,
session: Session = NEW_SESSION,
):
"""
Upgrades the DB.
:param to_revision: Optional Alembic revision ID to upgrade *to*.
If omitted, upgrades to latest revision.
:param from_revision: Optional Alembic revision ID to upgrade *from*.
Not compatible with ``sql_only=False``.
:param show_sql_only: if True, migration statements will be printed but not executed.
:param session: sqlalchemy session with connection to Airflow metadata database
:return: None
"""
if from_revision and not show_sql_only:
raise AirflowException("`from_revision` only supported with `sql_only=True`.")
# alembic adds significant import time, so we import it lazily
if not settings.SQL_ALCHEMY_CONN:
raise RuntimeError("The settings.SQL_ALCHEMY_CONN not set. This is a critical assertion.")
from alembic import command
import_all_models()
config = _get_alembic_config()
if show_sql_only:
if not from_revision:
from_revision = _get_current_revision(session)
if not to_revision:
script = _get_script_object()
to_revision = script.get_current_head()
if to_revision == from_revision:
print_happy_cat("No migrations to apply; nothing to do.")
return
if not _revision_greater(config, to_revision, from_revision):
raise ValueError(
f"Requested *to* revision {to_revision} is older than *from* revision {from_revision}. "
"Please check your requested versions / revisions."
)
_revisions_above_min_for_offline(config=config, revisions=[from_revision, to_revision])
_offline_migration(command.upgrade, config, f"{from_revision}:{to_revision}")
return # only running sql; our job is done
errors_seen = False
for err in _check_migration_errors(session=session):
if not errors_seen:
log.error("Automatic migration is not available")
errors_seen = True
log.error("%s", err)
if errors_seen:
exit(1)
if not to_revision and not _get_current_revision(session=session):
# Don't load default connections
# New DB; initialize and exit
initdb(session=session, load_connections=False)
return
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS):
import sqlalchemy.pool
log.info("Creating tables")
val = os.environ.get("AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE")
try:
# Reconfigure the ORM to use _EXACTLY_ one connection, otherwise some db engines hang forever
# trying to ALTER TABLEs
os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE"] = "1"
settings.reconfigure_orm(pool_class=sqlalchemy.pool.SingletonThreadPool)
command.upgrade(config, revision=to_revision or "heads")
finally:
if val is None:
os.environ.pop("AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE")
else:
os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE"] = val
settings.reconfigure_orm()
if reserialize_dags:
_reserialize_dags(session=session)
add_default_pool_if_not_exists(session=session)
synchronize_log_template(session=session)
@provide_session
def resetdb(session: Session = NEW_SESSION, skip_init: bool = False):
"""Clear out the database."""
if not settings.engine:
raise RuntimeError("The settings.engine must be set. This is a critical assertion")
log.info("Dropping tables that exist")
import_all_models()
connection = settings.engine.connect()
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS):
with connection.begin():
drop_airflow_models(connection)
drop_airflow_moved_tables(connection)
if not skip_init:
initdb(session=session)
@provide_session
def bootstrap_dagbag(session: Session = NEW_SESSION):
from airflow.models.dag import DAG
from airflow.models.dagbag import DagBag
dagbag = DagBag()
# Save DAGs in the ORM
dagbag.sync_to_db(session=session)
# Deactivate the unknown ones
DAG.deactivate_unknown_dags(dagbag.dags.keys(), session=session)
@provide_session
def downgrade(*, to_revision, from_revision=None, show_sql_only=False, session: Session = NEW_SESSION):
"""
Downgrade the airflow metastore schema to a prior version.
:param to_revision: The alembic revision to downgrade *to*.
:param show_sql_only: if True, print sql statements but do not run them
:param from_revision: if supplied, alembic revision to dawngrade *from*. This may only
be used in conjunction with ``sql=True`` because if we actually run the commands,
we should only downgrade from the *current* revision.
:param session: sqlalchemy session for connection to airflow metadata database
"""
if from_revision and not show_sql_only:
raise ValueError(
"`from_revision` can't be combined with `sql=False`. When actually "
"applying a downgrade (instead of just generating sql), we always "
"downgrade from current revision."
)
if not settings.SQL_ALCHEMY_CONN:
raise RuntimeError("The settings.SQL_ALCHEMY_CONN not set.")
# alembic adds significant import time, so we import it lazily
from alembic import command
log.info("Attempting downgrade to revision %s", to_revision)
config = _get_alembic_config()
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS):
if show_sql_only:
log.warning("Generating sql scripts for manual migration.")
if not from_revision:
from_revision = _get_current_revision(session)
revision_range = f"{from_revision}:{to_revision}"
_offline_migration(command.downgrade, config=config, revision=revision_range)
else:
log.info("Applying downgrade migrations.")
command.downgrade(config, revision=to_revision, sql=show_sql_only)
def drop_airflow_models(connection):
"""
Drops all airflow models.
:param connection: SQLAlchemy Connection
:return: None
"""
from airflow.models.base import Base
from airflow.www.fab_security.sqla.models import Model
Base.metadata.drop_all(connection)
Model.metadata.drop_all(connection)
db = _get_flask_db(connection.engine.url)
db.drop_all()
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
migration_ctx = MigrationContext.configure(connection)
version = migration_ctx._version
if inspect(connection).has_table(version.name):
version.drop(connection)
def drop_airflow_moved_tables(connection):
from airflow.models.base import Base
from airflow.settings import AIRFLOW_MOVED_TABLE_PREFIX
tables = set(inspect(connection).get_table_names())
to_delete = [Table(x, Base.metadata) for x in tables if x.startswith(AIRFLOW_MOVED_TABLE_PREFIX)]
for tbl in to_delete:
tbl.drop(settings.engine, checkfirst=False)
Base.metadata.remove(tbl)
@provide_session
def check(session: Session = NEW_SESSION):
"""
Checks if the database works.
:param session: session of the sqlalchemy
"""
session.execute(text("select 1 as is_alive;"))
log.info("Connection successful.")
@enum.unique
class DBLocks(enum.IntEnum):
"""
Cross-db Identifiers for advisory global database locks.
Postgres uses int64 lock ids so we use the integer value, MySQL uses names, so we
call ``str()`, which is implemented using the ``_name_`` field.
"""
MIGRATIONS = enum.auto()
SCHEDULER_CRITICAL_SECTION = enum.auto()
def __str__(self):
return f"airflow_{self._name_}"
@contextlib.contextmanager
def create_global_lock(
session: Session,
lock: DBLocks,
lock_timeout: int = 1800,
) -> Generator[None, None, None]:
"""Contextmanager that will create and teardown a global db lock."""
conn = session.get_bind().connect()
dialect = conn.dialect
try:
if dialect.name == "postgresql":
conn.execute(text("SET LOCK_TIMEOUT to :timeout"), {"timeout": lock_timeout})
conn.execute(text("SELECT pg_advisory_lock(:id)"), {"id": lock.value})
elif dialect.name == "mysql" and dialect.server_version_info >= (5, 6):
conn.execute(text("SELECT GET_LOCK(:id, :timeout)"), {"id": str(lock), "timeout": lock_timeout})
elif dialect.name == "mssql":
# TODO: make locking work for MSSQL
pass
yield
finally:
if dialect.name == "postgresql":
conn.execute(text("SET LOCK_TIMEOUT TO DEFAULT"))
(unlocked,) = conn.execute(text("SELECT pg_advisory_unlock(:id)"), {"id": lock.value}).fetchone()
if not unlocked:
raise RuntimeError("Error releasing DB lock!")
elif dialect.name == "mysql" and dialect.server_version_info >= (5, 6):
conn.execute(text("select RELEASE_LOCK(:id)"), {"id": str(lock)})
elif dialect.name == "mssql":
# TODO: make locking work for MSSQL
pass
def compare_type(context, inspected_column, metadata_column, inspected_type, metadata_type):
"""
Compare types between ORM and DB .
return False if the metadata_type is the same as the inspected_type
or None to allow the default implementation to compare these
types. a return value of True means the two types do not
match and should result in a type change operation.
"""
if context.dialect.name == "mysql":
from sqlalchemy import String
from sqlalchemy.dialects import mysql
if isinstance(inspected_type, mysql.VARCHAR) and isinstance(metadata_type, String):
# This is a hack to get around MySQL VARCHAR collation
# not being possible to change from utf8_bin to utf8mb3_bin.
# We only make sure lengths are the same
if inspected_type.length != metadata_type.length:
return True
return False
return None
def compare_server_default(
context, inspected_column, metadata_column, inspected_default, metadata_default, rendered_metadata_default
):
"""
Compare server defaults between ORM and DB .
return True if the defaults are different, False if not, or None to allow the default implementation
to compare these defaults
Comparing server_default is not accurate in MSSQL because the
inspected_default above != metadata_default, while in Postgres/MySQL they are equal.
This is an issue with alembic
In SQLite: task_instance.map_index & task_reschedule.map_index
are not comparing accurately. Sometimes they are equal, sometimes they are not.
Alembic warned that this feature has varied accuracy depending on backends.
See: (https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.
environment.EnvironmentContext.configure.params.compare_server_default)
"""
dialect_name = context.connection.dialect.name
if dialect_name in ["mssql", "sqlite"]:
return False
if (
dialect_name == "mysql"
and metadata_column.name == "pool_slots"
and metadata_column.table.name == "task_instance"
):
# We removed server_default value in ORM to avoid expensive migration
# (it was removed in postgres DB in migration head 7b2661a43ba3 ).
# As a side note, server default value here was only actually needed for the migration
# where we added the column in the first place -- now that it exists and all
# existing rows are populated with a value this server default is never used.
return False
return None
def get_sqla_model_classes():
"""
Get all SQLAlchemy class mappers.
SQLAlchemy < 1.4 does not support registry.mappers so we use
try/except to handle it.
"""
from airflow.models.base import Base
try:
return [mapper.class_ for mapper in Base.registry.mappers]
except AttributeError:
return Base._decl_class_registry.values()
| 63,605 | 32.9232 | 110 |
py
|
airflow
|
airflow-main/airflow/utils/process_utils.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for running or stopping processes."""
from __future__ import annotations
import errno
import logging
import os
import select
import shlex
import signal
import subprocess
import sys
from airflow.utils.platform import IS_WINDOWS
if not IS_WINDOWS:
import pty
import termios
import tty
from contextlib import contextmanager
from typing import Generator
import psutil
from lockfile.pidlockfile import PIDLockFile
from airflow.configuration import conf
from airflow.exceptions import AirflowException
log = logging.getLogger(__name__)
# When killing processes, time to wait after issuing a SIGTERM before issuing a
# SIGKILL.
DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM = conf.getint("core", "KILLED_TASK_CLEANUP_TIME")
def reap_process_group(
process_group_id: int,
logger,
sig: signal.Signals = signal.SIGTERM,
timeout: int = DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM,
) -> dict[int, int]:
"""
Send sig (SIGTERM) to the process group of pid.
Tries really hard to terminate all processes in the group (including grandchildren). Will send
sig (SIGTERM) to the process group of pid. If any process is alive after timeout
a SIGKILL will be send.
:param process_group_id: process group id to kill.
The process that wants to create the group should run
`airflow.utils.process_utils.set_new_process_group()` as the first command
it executes which will set group id = process_id. Effectively the process that is the
"root" of the group has pid = gid and all other processes in the group have different
pids but the same gid (equal the pid of the root process)
:param logger: log handler
:param sig: signal type
:param timeout: how much time a process has to terminate
"""
returncodes = {}
def on_terminate(p):
logger.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode)
returncodes[p.pid] = p.returncode
def signal_procs(sig):
if IS_WINDOWS:
return
try:
logger.info("Sending the signal %s to group %s", sig, process_group_id)
os.killpg(process_group_id, sig)
except OSError as err_killpg:
# If operation not permitted error is thrown due to run_as_user,
# use sudo -n(--non-interactive) to kill the process
if err_killpg.errno == errno.EPERM:
subprocess.check_call(
["sudo", "-n", "kill", "-" + str(int(sig))]
+ [str(p.pid) for p in all_processes_in_the_group]
)
elif err_killpg.errno == errno.ESRCH:
# There is a rare condition that the process has not managed yet to change it's process
# group. In this case os.killpg fails with ESRCH error
# So we additionally send a kill signal to the process itself.
logger.info(
"Sending the signal %s to process %s as process group is missing.", sig, process_group_id
)
try:
os.kill(process_group_id, sig)
except OSError as err_kill:
if err_kill.errno == errno.EPERM:
subprocess.check_call(["sudo", "-n", "kill", "-" + str(process_group_id)])
else:
raise
else:
raise
if not IS_WINDOWS and process_group_id == os.getpgid(0):
raise RuntimeError("I refuse to kill myself")
try:
parent = psutil.Process(process_group_id)
all_processes_in_the_group = parent.children(recursive=True)
all_processes_in_the_group.append(parent)
except psutil.NoSuchProcess:
# The process already exited, but maybe it's children haven't.
all_processes_in_the_group = []
for proc in psutil.process_iter():
try:
if os.getpgid(proc.pid) == process_group_id and proc.pid != 0:
all_processes_in_the_group.append(proc)
except OSError:
pass
logger.info(
"Sending %s to group %s. PIDs of all processes in the group: %s",
sig,
process_group_id,
[p.pid for p in all_processes_in_the_group],
)
try:
signal_procs(sig)
except OSError as err:
# No such process, which means there is no such process group - our job
# is done
if err.errno == errno.ESRCH:
return returncodes
_, alive = psutil.wait_procs(all_processes_in_the_group, timeout=timeout, callback=on_terminate)
if alive:
for proc in alive:
logger.warning("process %s did not respond to SIGTERM. Trying SIGKILL", proc)
try:
signal_procs(signal.SIGKILL)
except OSError as err:
if err.errno != errno.ESRCH:
raise
_, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
for proc in alive:
logger.error("Process %s (%s) could not be killed. Giving up.", proc, proc.pid)
return returncodes
def execute_in_subprocess(cmd: list[str], cwd: str | None = None) -> None:
"""
Execute a process and stream output to logger.
:param cmd: command and arguments to run
:param cwd: Current working directory passed to the Popen constructor
"""
execute_in_subprocess_with_kwargs(cmd, cwd=cwd)
def execute_in_subprocess_with_kwargs(cmd: list[str], **kwargs) -> None:
"""
Execute a process and stream output to logger.
:param cmd: command and arguments to run
All other keyword args will be passed directly to subprocess.Popen
"""
log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd))
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, close_fds=True, **kwargs
) as proc:
log.info("Output:")
if proc.stdout:
with proc.stdout:
for line in iter(proc.stdout.readline, b""):
log.info("%s", line.decode().rstrip())
exit_code = proc.wait()
if exit_code != 0:
raise subprocess.CalledProcessError(exit_code, cmd)
def execute_interactive(cmd: list[str], **kwargs) -> None:
"""
Run the new command as a subprocess.
Runs the new command as a subprocess and ensures that the terminal's state is restored to its original
state after the process is completed e.g. if the subprocess hides the cursor, it will be restored after
the process is completed.
"""
log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd))
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
primary_fd, secondary_fd = pty.openpty()
try:
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
with subprocess.Popen(
cmd,
stdin=secondary_fd,
stdout=secondary_fd,
stderr=secondary_fd,
universal_newlines=True,
**kwargs,
) as proc:
while proc.poll() is None:
readable_fbs, _, _ = select.select([sys.stdin, primary_fd], [], [])
if sys.stdin in readable_fbs:
input_data = os.read(sys.stdin.fileno(), 10240)
os.write(primary_fd, input_data)
if primary_fd in readable_fbs:
output_data = os.read(primary_fd, 10240)
if output_data:
os.write(sys.stdout.fileno(), output_data)
finally:
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
def kill_child_processes_by_pids(pids_to_kill: list[int], timeout: int = 5) -> None:
"""
Kills child processes for the current process.
First, it sends the SIGTERM signal, and after the time specified by the `timeout` parameter, sends
the SIGKILL signal, if the process is still alive.
:param pids_to_kill: List of PID to be killed.
:param timeout: The time to wait before sending the SIGKILL signal.
"""
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [
x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill
]
# First try SIGTERM
for child in child_processes:
log.info("Terminating child PID: %s", child.pid)
child.terminate()
log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout, callback=lambda x: log.info("Terminated PID %s", x.pid)
)
except psutil.TimeoutExpired:
log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [
x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill
]
if child_processes:
log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
@contextmanager
def patch_environ(new_env_variables: dict[str, str]) -> Generator[None, None, None]:
"""
Set environment variables in context.
After leaving the context, it restores its original state.
:param new_env_variables: Environment variables to set
"""
current_env_state = {key: os.environ.get(key) for key in new_env_variables.keys()}
os.environ.update(new_env_variables)
try:
yield
finally:
for key, old_value in current_env_state.items():
if old_value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = old_value
def check_if_pidfile_process_is_running(pid_file: str, process_name: str):
"""
Checks if a pidfile already exists and process is still running.
If process is dead then pidfile is removed.
:param pid_file: path to the pidfile
:param process_name: name used in exception if process is up and
running
"""
pid_lock_file = PIDLockFile(path=pid_file)
# If file exists
if pid_lock_file.is_locked():
# Read the pid
pid = pid_lock_file.read_pid()
if pid is None:
return
try:
# Check if process is still running
proc = psutil.Process(pid)
if proc.is_running():
raise AirflowException(f"The {process_name} is already running under PID {pid}.")
except psutil.NoSuchProcess:
# If process is dead remove the pidfile
pid_lock_file.break_lock()
def set_new_process_group() -> None:
"""Try to set current process to a new process group.
That makes it easy to kill all sub-process of this at the OS-level,
rather than having to iterate the child processes.
If current process was spawned by system call ``exec()``, the current
process group is kept.
"""
if os.getpid() == os.getsid(0):
# If PID = SID than process a session leader, and it is not possible to change process group
return
os.setpgid(0, 0)
| 12,375 | 35.293255 | 109 |
py
|
airflow
|
airflow-main/airflow/utils/db_cleanup.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module took inspiration from the community maintenance dag.
See:
(https://github.com/teamclairvoyant/airflow-maintenance-dags/blob/4e5c7682a808082561d60cbc9cafaa477b0d8c65/db-cleanup/airflow-db-cleanup.py).
"""
from __future__ import annotations
import csv
import logging
import os
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any
from pendulum import DateTime
from sqlalchemy import and_, column, false, func, inspect, table, text
from sqlalchemy.exc import OperationalError, ProgrammingError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.orm import Query, Session, aliased
from sqlalchemy.sql.expression import ClauseElement, Executable, tuple_
from airflow import AirflowException
from airflow.cli.simple_table import AirflowConsole
from airflow.models import Base
from airflow.utils import timezone
from airflow.utils.db import reflect_tables
from airflow.utils.helpers import ask_yesno
from airflow.utils.session import NEW_SESSION, provide_session
logger = logging.getLogger(__file__)
ARCHIVE_TABLE_PREFIX = "_airflow_deleted__"
@dataclass
class _TableConfig:
"""
Config class for performing cleanup on a table.
:param table_name: the table
:param extra_columns: any columns besides recency_column_name that we'll need in queries
:param recency_column_name: date column to filter by
:param keep_last: whether the last record should be kept even if it's older than clean_before_timestamp
:param keep_last_filters: the "keep last" functionality will preserve the most recent record
in the table. to ignore certain records even if they are the latest in the table, you can
supply additional filters here (e.g. externally triggered dag runs)
:param keep_last_group_by: if keeping the last record, can keep the last record for each group
"""
table_name: str
recency_column_name: str
extra_columns: list[str] | None = None
keep_last: bool = False
keep_last_filters: Any | None = None
keep_last_group_by: Any | None = None
def __post_init__(self):
self.recency_column = column(self.recency_column_name)
self.orm_model: Base = table(
self.table_name, *[column(x) for x in self.extra_columns or []], self.recency_column
)
def __lt__(self, other):
return self.table_name < other.table_name
@property
def readable_config(self):
return dict(
table=self.orm_model.name,
recency_column=str(self.recency_column),
keep_last=self.keep_last,
keep_last_filters=[str(x) for x in self.keep_last_filters] if self.keep_last_filters else None,
keep_last_group_by=str(self.keep_last_group_by),
)
config_list: list[_TableConfig] = [
_TableConfig(table_name="job", recency_column_name="latest_heartbeat"),
_TableConfig(table_name="dag", recency_column_name="last_parsed_time"),
_TableConfig(
table_name="dag_run",
recency_column_name="start_date",
extra_columns=["dag_id", "external_trigger"],
keep_last=True,
keep_last_filters=[column("external_trigger") == false()],
keep_last_group_by=["dag_id"],
),
_TableConfig(table_name="dataset_event", recency_column_name="timestamp"),
_TableConfig(table_name="import_error", recency_column_name="timestamp"),
_TableConfig(table_name="log", recency_column_name="dttm"),
_TableConfig(table_name="sla_miss", recency_column_name="timestamp"),
_TableConfig(table_name="task_fail", recency_column_name="start_date"),
_TableConfig(table_name="task_instance", recency_column_name="start_date"),
_TableConfig(table_name="task_reschedule", recency_column_name="start_date"),
_TableConfig(table_name="xcom", recency_column_name="timestamp"),
_TableConfig(table_name="callback_request", recency_column_name="created_at"),
_TableConfig(table_name="celery_taskmeta", recency_column_name="date_done"),
_TableConfig(table_name="celery_tasksetmeta", recency_column_name="date_done"),
]
config_dict: dict[str, _TableConfig] = {x.orm_model.name: x for x in sorted(config_list)}
def _check_for_rows(*, query: Query, print_rows=False):
num_entities = query.count()
print(f"Found {num_entities} rows meeting deletion criteria.")
if print_rows:
max_rows_to_print = 100
if num_entities > 0:
print(f"Printing first {max_rows_to_print} rows.")
logger.debug("print entities query: %s", query)
for entry in query.limit(max_rows_to_print):
print(entry.__dict__)
return num_entities
def _dump_table_to_file(*, target_table, file_path, export_format, session):
if export_format == "csv":
with open(file_path, "w") as f:
csv_writer = csv.writer(f)
cursor = session.execute(text(f"SELECT * FROM {target_table}"))
csv_writer.writerow(cursor.keys())
csv_writer.writerows(cursor.fetchall())
else:
raise AirflowException(f"Export format {export_format} is not supported.")
def _do_delete(*, query, orm_model, skip_archive, session):
from datetime import datetime
import re2
print("Performing Delete...")
# using bulk delete
# create a new table and copy the rows there
timestamp_str = re2.sub(r"[^\d]", "", datetime.utcnow().isoformat())[:14]
target_table_name = f"{ARCHIVE_TABLE_PREFIX}{orm_model.name}__{timestamp_str}"
print(f"Moving data to table {target_table_name}")
bind = session.get_bind()
dialect_name = bind.dialect.name
if dialect_name == "mysql":
# MySQL with replication needs this split into two queries, so just do it for all MySQL
# ERROR 1786 (HY000): Statement violates GTID consistency: CREATE TABLE ... SELECT.
session.execute(text(f"CREATE TABLE {target_table_name} LIKE {orm_model.name}"))
metadata = reflect_tables([target_table_name], session)
target_table = metadata.tables[target_table_name]
insert_stm = target_table.insert().from_select(target_table.c, query)
logger.debug("insert statement:\n%s", insert_stm.compile())
session.execute(insert_stm)
else:
stmt = CreateTableAs(target_table_name, query.selectable)
logger.debug("ctas query:\n%s", stmt.compile())
session.execute(stmt)
session.commit()
# delete the rows from the old table
metadata = reflect_tables([orm_model.name, target_table_name], session)
source_table = metadata.tables[orm_model.name]
target_table = metadata.tables[target_table_name]
logger.debug("rows moved; purging from %s", source_table.name)
if dialect_name == "sqlite":
pk_cols = source_table.primary_key.columns
delete = source_table.delete().where(
tuple_(*pk_cols).in_(
session.query(*[target_table.c[x.name] for x in source_table.primary_key.columns]).subquery()
)
)
else:
delete = source_table.delete().where(
and_(col == target_table.c[col.name] for col in source_table.primary_key.columns)
)
logger.debug("delete statement:\n%s", delete.compile())
session.execute(delete)
session.commit()
if skip_archive:
target_table.drop()
session.commit()
print("Finished Performing Delete")
def _subquery_keep_last(*, recency_column, keep_last_filters, group_by_columns, max_date_colname, session):
subquery = session.query(*group_by_columns, func.max(recency_column).label(max_date_colname))
if keep_last_filters is not None:
for entry in keep_last_filters:
subquery = subquery.filter(entry)
if group_by_columns is not None:
subquery = subquery.group_by(*group_by_columns)
return subquery.subquery(name="latest")
class CreateTableAs(Executable, ClauseElement):
"""Custom sqlalchemy clause element for CTAS operations."""
def __init__(self, name, query):
self.name = name
self.query = query
@compiles(CreateTableAs)
def _compile_create_table_as__other(element, compiler, **kw):
return f"CREATE TABLE {element.name} AS {compiler.process(element.query)}"
@compiles(CreateTableAs, "mssql")
def _compile_create_table_as__mssql(element, compiler, **kw):
return f"WITH cte AS ( {compiler.process(element.query)} ) SELECT * INTO {element.name} FROM cte"
def _build_query(
*,
orm_model,
recency_column,
keep_last,
keep_last_filters,
keep_last_group_by,
clean_before_timestamp,
session,
**kwargs,
):
base_table_alias = "base"
base_table = aliased(orm_model, name=base_table_alias)
query = session.query(base_table).with_entities(text(f"{base_table_alias}.*"))
base_table_recency_col = base_table.c[recency_column.name]
conditions = [base_table_recency_col < clean_before_timestamp]
if keep_last:
max_date_col_name = "max_date_per_group"
group_by_columns = [column(x) for x in keep_last_group_by]
subquery = _subquery_keep_last(
recency_column=recency_column,
keep_last_filters=keep_last_filters,
group_by_columns=group_by_columns,
max_date_colname=max_date_col_name,
session=session,
)
query = query.select_from(base_table).outerjoin(
subquery,
and_(
*[base_table.c[x] == subquery.c[x] for x in keep_last_group_by],
base_table_recency_col == column(max_date_col_name),
),
)
conditions.append(column(max_date_col_name).is_(None))
query = query.filter(and_(*conditions))
return query
def _cleanup_table(
*,
orm_model,
recency_column,
keep_last,
keep_last_filters,
keep_last_group_by,
clean_before_timestamp,
dry_run=True,
verbose=False,
skip_archive=False,
session,
**kwargs,
):
print()
if dry_run:
print(f"Performing dry run for table {orm_model.name}")
query = _build_query(
orm_model=orm_model,
recency_column=recency_column,
keep_last=keep_last,
keep_last_filters=keep_last_filters,
keep_last_group_by=keep_last_group_by,
clean_before_timestamp=clean_before_timestamp,
session=session,
)
logger.debug("old rows query:\n%s", query.selectable.compile())
print(f"Checking table {orm_model.name}")
num_rows = _check_for_rows(query=query, print_rows=False)
if num_rows and not dry_run:
_do_delete(query=query, orm_model=orm_model, skip_archive=skip_archive, session=session)
session.commit()
def _confirm_delete(*, date: DateTime, tables: list[str]):
for_tables = f" for tables {tables!r}" if tables else ""
question = (
f"You have requested that we purge all data prior to {date}{for_tables}.\n"
f"This is irreversible. Consider backing up the tables first and / or doing a dry run "
f"with option --dry-run.\n"
f"Enter 'delete rows' (without quotes) to proceed."
)
print(question)
answer = input().strip()
if not answer == "delete rows":
raise SystemExit("User did not confirm; exiting.")
def _confirm_drop_archives(*, tables: list[str]):
# if length of tables is greater than 3, show the total count
if len(tables) > 3:
text_ = f"{len(tables)} archived tables prefixed with {ARCHIVE_TABLE_PREFIX}"
else:
text_ = f"the following archived tables {tables}"
question = (
f"You have requested that we drop {text_}.\n"
f"This is irreversible. Consider backing up the tables first \n"
)
print(question)
if len(tables) > 3:
show_tables = ask_yesno("Show tables? (y/n): ")
if show_tables:
print(tables, "\n")
answer = input("Enter 'drop archived tables' (without quotes) to proceed.\n").strip()
if not answer == "drop archived tables":
raise SystemExit("User did not confirm; exiting.")
def _print_config(*, configs: dict[str, _TableConfig]):
data = [x.readable_config for x in configs.values()]
AirflowConsole().print_as_table(data=data)
@contextmanager
def _suppress_with_logging(table, session):
"""
Suppresses errors but logs them.
Also stores the exception instance so it can be referred to after exiting context.
"""
try:
yield
except (OperationalError, ProgrammingError):
logger.warning("Encountered error when attempting to clean table '%s'. ", table)
logger.debug("Traceback for table '%s'", table, exc_info=True)
if session.is_active:
logger.debug("Rolling back transaction")
session.rollback()
def _effective_table_names(*, table_names: list[str] | None):
desired_table_names = set(table_names or config_dict)
effective_config_dict = {k: v for k, v in config_dict.items() if k in desired_table_names}
effective_table_names = set(effective_config_dict)
if desired_table_names != effective_table_names:
outliers = desired_table_names - effective_table_names
logger.warning(
"The following table(s) are not valid choices and will be skipped: %s", sorted(outliers)
)
if not effective_table_names:
raise SystemExit("No tables selected for db cleanup. Please choose valid table names.")
return effective_table_names, effective_config_dict
def _get_archived_table_names(table_names, session):
inspector = inspect(session.bind)
db_table_names = [x for x in inspector.get_table_names() if x.startswith(ARCHIVE_TABLE_PREFIX)]
effective_table_names, _ = _effective_table_names(table_names=table_names)
# Filter out tables that don't start with the archive prefix
archived_table_names = [
table_name
for table_name in db_table_names
if any("__" + x + "__" in table_name for x in effective_table_names)
]
return archived_table_names
@provide_session
def run_cleanup(
*,
clean_before_timestamp: DateTime,
table_names: list[str] | None = None,
dry_run: bool = False,
verbose: bool = False,
confirm: bool = True,
skip_archive: bool = False,
session: Session = NEW_SESSION,
):
"""
Purges old records in airflow metadata database.
The last non-externally-triggered dag run will always be kept in order to ensure
continuity of scheduled dag runs.
Where there are foreign key relationships, deletes will cascade, so that for
example if you clean up old dag runs, the associated task instances will
be deleted.
:param clean_before_timestamp: The timestamp before which data should be purged
:param table_names: Optional. List of table names to perform maintenance on. If list not provided,
will perform maintenance on all tables.
:param dry_run: If true, print rows meeting deletion criteria
:param verbose: If true, may provide more detailed output.
:param confirm: Require user input to confirm before processing deletions.
:param skip_archive: Set to True if you don't want the purged rows preservied in an archive table.
:param session: Session representing connection to the metadata database.
"""
clean_before_timestamp = timezone.coerce_datetime(clean_before_timestamp)
effective_table_names, effective_config_dict = _effective_table_names(table_names=table_names)
if dry_run:
print("Performing dry run for db cleanup.")
print(
f"Data prior to {clean_before_timestamp} would be purged "
f"from tables {effective_table_names} with the following config:\n"
)
_print_config(configs=effective_config_dict)
if not dry_run and confirm:
_confirm_delete(date=clean_before_timestamp, tables=sorted(effective_table_names))
existing_tables = reflect_tables(tables=None, session=session).tables
for table_name, table_config in effective_config_dict.items():
if table_name not in existing_tables:
logger.warning("Table %s not found. Skipping.", table_name)
continue
with _suppress_with_logging(table_name, session):
_cleanup_table(
clean_before_timestamp=clean_before_timestamp,
dry_run=dry_run,
verbose=verbose,
**table_config.__dict__,
skip_archive=skip_archive,
session=session,
)
session.commit()
@provide_session
def export_archived_records(
export_format,
output_path,
table_names=None,
drop_archives=False,
needs_confirm=True,
session: Session = NEW_SESSION,
):
"""Export archived data to the given output path in the given format."""
archived_table_names = _get_archived_table_names(table_names, session)
# If user chose to drop archives, check there are archive tables that exists
# before asking for confirmation
if drop_archives and archived_table_names and needs_confirm:
_confirm_drop_archives(tables=sorted(archived_table_names))
export_count = 0
dropped_count = 0
for table_name in archived_table_names:
logger.info("Exporting table %s", table_name)
_dump_table_to_file(
target_table=table_name,
file_path=os.path.join(output_path, f"{table_name}.{export_format}"),
export_format=export_format,
session=session,
)
export_count += 1
if drop_archives:
logger.info("Dropping archived table %s", table_name)
session.execute(text(f"DROP TABLE {table_name}"))
dropped_count += 1
logger.info("Total exported tables: %s, Total dropped tables: %s", export_count, dropped_count)
@provide_session
def drop_archived_tables(table_names, needs_confirm, session):
"""Drop archived tables."""
archived_table_names = _get_archived_table_names(table_names, session)
if needs_confirm and archived_table_names:
_confirm_drop_archives(tables=sorted(archived_table_names))
dropped_count = 0
for table_name in archived_table_names:
logger.info("Dropping archived table %s", table_name)
session.execute(text(f"DROP TABLE {table_name}"))
dropped_count += 1
logger.info("Total dropped tables: %s", dropped_count)
| 19,152 | 38.167689 | 141 |
py
|
airflow
|
airflow-main/airflow/utils/code_utils.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import functools
import inspect
import os
from typing import Any
def get_python_source(x: Any) -> str | None:
"""Helper function to get Python source (or not), preventing exceptions."""
if isinstance(x, str):
return x
if x is None:
return None
source_code = None
if isinstance(x, functools.partial):
source_code = inspect.getsource(x.func)
if source_code is None:
try:
source_code = inspect.getsource(x)
except TypeError:
pass
if source_code is None:
try:
source_code = inspect.getsource(x.__call__)
except (TypeError, AttributeError):
pass
if source_code is None:
source_code = f"No source code available for {type(x)}"
return source_code
def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str:
"""
Prepare code snippet with line numbers and a specific line marked.
:param file_path: File name
:param line_no: Line number
:param context_lines_count: The number of lines that will be cut before and after.
:return: str
"""
with open(file_path) as text_file:
# Highlight code
code = text_file.read()
code_lines = code.split("\n")
# Prepend line number
code_lines = [
f">{lno:3} | {line}" if line_no == lno else f"{lno:4} | {line}"
for lno, line in enumerate(code_lines, 1)
]
# # Cut out the snippet
start_line_no = max(0, line_no - context_lines_count - 1)
end_line_no = line_no + context_lines_count
code_lines = code_lines[start_line_no:end_line_no]
# Join lines
code = "\n".join(code_lines)
return code
def get_terminal_formatter(**opts):
"""Returns the best formatter available in the current terminal."""
if "256" in os.environ.get("TERM", ""):
from pygments.formatters.terminal256 import Terminal256Formatter
formatter = Terminal256Formatter(**opts)
else:
from pygments.formatters.terminal import TerminalFormatter
formatter = TerminalFormatter(**opts)
return formatter
| 3,001 | 31.27957 | 92 |
py
|
airflow
|
airflow-main/airflow/utils/airflow_flask_app.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, cast
from flask import Flask
from airflow.models.dagbag import DagBag
from airflow.www.extensions.init_appbuilder import AirflowAppBuilder
class AirflowApp(Flask):
"""Airflow Flask Application."""
appbuilder: AirflowAppBuilder
dag_bag: DagBag
api_auth: list[Any]
def get_airflow_app() -> AirflowApp:
from flask import current_app
return cast(AirflowApp, current_app)
| 1,244 | 30.923077 | 68 |
py
|
airflow
|
airflow-main/airflow/utils/task_group.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A collection of closely related tasks on the same DAG that should be grouped together visually."""
from __future__ import annotations
import copy
import functools
import operator
import weakref
from typing import TYPE_CHECKING, Any, Generator, Iterator, Sequence
import re2
from airflow.compat.functools import cache
from airflow.exceptions import (
AirflowDagCycleException,
AirflowException,
DuplicateTaskIdFound,
TaskAlreadyInTaskGroup,
)
from airflow.models.taskmixin import DAGNode, DependencyMixin
from airflow.serialization.enums import DagAttributeTypes
from airflow.utils.helpers import validate_group_key
if TYPE_CHECKING:
from sqlalchemy.orm import Session
from airflow.models.abstractoperator import AbstractOperator
from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG
from airflow.models.expandinput import ExpandInput
from airflow.models.operator import Operator
from airflow.utils.edgemodifier import EdgeModifier
class TaskGroup(DAGNode):
"""
A collection of tasks.
When set_downstream() or set_upstream() are called on the TaskGroup, it is applied across
all tasks within the group if necessary.
:param group_id: a unique, meaningful id for the TaskGroup. group_id must not conflict
with group_id of TaskGroup or task_id of tasks in the DAG. Root TaskGroup has group_id
set to None.
:param prefix_group_id: If set to True, child task_id and group_id will be prefixed with
this TaskGroup's group_id. If set to False, child task_id and group_id are not prefixed.
Default is True.
:param parent_group: The parent TaskGroup of this TaskGroup. parent_group is set to None
for the root TaskGroup.
:param dag: The DAG that this TaskGroup belongs to.
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators,
will override default_args defined in the DAG level.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:param tooltip: The tooltip of the TaskGroup node when displayed in the UI
:param ui_color: The fill color of the TaskGroup node when displayed in the UI
:param ui_fgcolor: The label color of the TaskGroup node when displayed in the UI
:param add_suffix_on_collision: If this task group name already exists,
automatically add `__1` etc suffixes
"""
used_group_ids: set[str | None]
def __init__(
self,
group_id: str | None,
prefix_group_id: bool = True,
parent_group: TaskGroup | None = None,
dag: DAG | None = None,
default_args: dict[str, Any] | None = None,
tooltip: str = "",
ui_color: str = "CornflowerBlue",
ui_fgcolor: str = "#000",
add_suffix_on_collision: bool = False,
):
from airflow.models.dag import DagContext
self.prefix_group_id = prefix_group_id
self.default_args = copy.deepcopy(default_args or {})
dag = dag or DagContext.get_current_dag()
if group_id is None:
# This creates a root TaskGroup.
if parent_group:
raise AirflowException("Root TaskGroup cannot have parent_group")
# used_group_ids is shared across all TaskGroups in the same DAG to keep track
# of used group_id to avoid duplication.
self.used_group_ids = set()
self.dag = dag
else:
if prefix_group_id:
# If group id is used as prefix, it should not contain spaces nor dots
# because it is used as prefix in the task_id
validate_group_key(group_id)
else:
if not isinstance(group_id, str):
raise ValueError("group_id must be str")
if not group_id:
raise ValueError("group_id must not be empty")
if not parent_group and not dag:
raise AirflowException("TaskGroup can only be used inside a dag")
parent_group = parent_group or TaskGroupContext.get_current_task_group(dag)
if not parent_group:
raise AirflowException("TaskGroup must have a parent_group except for the root TaskGroup")
if dag is not parent_group.dag:
raise RuntimeError(
"Cannot mix TaskGroups from different DAGs: %s and %s", dag, parent_group.dag
)
self.used_group_ids = parent_group.used_group_ids
# if given group_id already used assign suffix by incrementing largest used suffix integer
# Example : task_group ==> task_group__1 -> task_group__2 -> task_group__3
self._group_id = group_id
self._check_for_group_id_collisions(add_suffix_on_collision)
self.children: dict[str, DAGNode] = {}
if parent_group:
parent_group.add(self)
self._update_default_args(parent_group)
self.used_group_ids.add(self.group_id)
if self.group_id:
self.used_group_ids.add(self.downstream_join_id)
self.used_group_ids.add(self.upstream_join_id)
self.tooltip = tooltip
self.ui_color = ui_color
self.ui_fgcolor = ui_fgcolor
# Keep track of TaskGroups or tasks that depend on this entire TaskGroup separately
# so that we can optimize the number of edges when entire TaskGroups depend on each other.
self.upstream_group_ids: set[str | None] = set()
self.downstream_group_ids: set[str | None] = set()
self.upstream_task_ids = set()
self.downstream_task_ids = set()
def _check_for_group_id_collisions(self, add_suffix_on_collision: bool):
if self._group_id is None:
return
# if given group_id already used assign suffix by incrementing largest used suffix integer
# Example : task_group ==> task_group__1 -> task_group__2 -> task_group__3
if self._group_id in self.used_group_ids:
if not add_suffix_on_collision:
raise DuplicateTaskIdFound(f"group_id '{self._group_id}' has already been added to the DAG")
base = re2.split(r"__\d+$", self._group_id)[0]
suffixes = sorted(
int(re2.split(r"^.+__", used_group_id)[1])
for used_group_id in self.used_group_ids
if used_group_id is not None and re2.match(rf"^{base}__\d+$", used_group_id)
)
if not suffixes:
self._group_id += "__1"
else:
self._group_id = f"{base}__{suffixes[-1] + 1}"
def _update_default_args(self, parent_group: TaskGroup):
if parent_group.default_args:
self.default_args = {**parent_group.default_args, **self.default_args}
@classmethod
def create_root(cls, dag: DAG) -> TaskGroup:
"""Create a root TaskGroup with no group_id or parent."""
return cls(group_id=None, dag=dag)
@property
def node_id(self):
return self.group_id
@property
def is_root(self) -> bool:
"""Returns True if this TaskGroup is the root TaskGroup. Otherwise False."""
return not self.group_id
@property
def parent_group(self) -> TaskGroup | None:
return self.task_group
def __iter__(self):
for child in self.children.values():
if isinstance(child, TaskGroup):
yield from child
else:
yield child
def add(self, task: DAGNode) -> DAGNode:
"""Add a task to this TaskGroup.
:meta private:
"""
from airflow.models.abstractoperator import AbstractOperator
if TaskGroupContext.active:
if task.task_group and task.task_group != self:
task.task_group.children.pop(task.node_id, None)
task.task_group = self
existing_tg = task.task_group
if isinstance(task, AbstractOperator) and existing_tg is not None and existing_tg != self:
raise TaskAlreadyInTaskGroup(task.node_id, existing_tg.node_id, self.node_id)
# Set the TG first, as setting it might change the return value of node_id!
task.task_group = weakref.proxy(self)
key = task.node_id
if key in self.children:
node_type = "Task" if hasattr(task, "task_id") else "Task Group"
raise DuplicateTaskIdFound(f"{node_type} id '{key}' has already been added to the DAG")
if isinstance(task, TaskGroup):
if self.dag:
if task.dag is not None and self.dag is not task.dag:
raise RuntimeError(
"Cannot mix TaskGroups from different DAGs: %s and %s", self.dag, task.dag
)
task.dag = self.dag
if task.children:
raise AirflowException("Cannot add a non-empty TaskGroup")
self.children[key] = task
return task
def _remove(self, task: DAGNode) -> None:
key = task.node_id
if key not in self.children:
raise KeyError(f"Node id {key!r} not part of this task group")
self.used_group_ids.remove(key)
del self.children[key]
@property
def group_id(self) -> str | None:
"""group_id of this TaskGroup."""
if self.task_group and self.task_group.prefix_group_id and self.task_group._group_id:
# defer to parent whether it adds a prefix
return self.task_group.child_id(self._group_id)
return self._group_id
@property
def label(self) -> str | None:
"""group_id excluding parent's group_id used as the node label in UI."""
return self._group_id
def update_relative(
self, other: DependencyMixin, upstream: bool = True, edge_modifier: EdgeModifier | None = None
) -> None:
"""
Overrides TaskMixin.update_relative.
Update upstream_group_ids/downstream_group_ids/upstream_task_ids/downstream_task_ids
accordingly so that we can reduce the number of edges when displaying Graph view.
"""
if isinstance(other, TaskGroup):
# Handles setting relationship between a TaskGroup and another TaskGroup
if upstream:
parent, child = (self, other)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, other.downstream_join_id, self.upstream_join_id)
else:
parent, child = (other, self)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, self.downstream_join_id, other.upstream_join_id)
parent.upstream_group_ids.add(child.group_id)
child.downstream_group_ids.add(parent.group_id)
else:
# Handles setting relationship between a TaskGroup and a task
for task in other.roots:
if not isinstance(task, DAGNode):
raise AirflowException(
"Relationships can only be set between TaskGroup "
f"or operators; received {task.__class__.__name__}"
)
# Do not set a relationship between a TaskGroup and a Label's roots
if self == task:
continue
if upstream:
self.upstream_task_ids.add(task.node_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, task.node_id, self.upstream_join_id)
else:
self.downstream_task_ids.add(task.node_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, self.downstream_join_id, task.node_id)
def _set_relatives(
self,
task_or_task_list: DependencyMixin | Sequence[DependencyMixin],
upstream: bool = False,
edge_modifier: EdgeModifier | None = None,
) -> None:
"""
Call set_upstream/set_downstream for all root/leaf tasks within this TaskGroup.
Update upstream_group_ids/downstream_group_ids/upstream_task_ids/downstream_task_ids.
"""
if not isinstance(task_or_task_list, Sequence):
task_or_task_list = [task_or_task_list]
for task_like in task_or_task_list:
self.update_relative(task_like, upstream, edge_modifier=edge_modifier)
if upstream:
for task in self.get_roots():
task.set_upstream(task_or_task_list)
else:
for task in self.get_leaves():
task.set_downstream(task_or_task_list)
def __enter__(self) -> TaskGroup:
TaskGroupContext.push_context_managed_task_group(self)
return self
def __exit__(self, _type, _value, _tb):
TaskGroupContext.pop_context_managed_task_group()
def has_task(self, task: BaseOperator) -> bool:
"""Returns True if this TaskGroup or its children TaskGroups contains the given task."""
if task.task_id in self.children:
return True
return any(child.has_task(task) for child in self.children.values() if isinstance(child, TaskGroup))
@property
def roots(self) -> list[BaseOperator]:
"""Required by TaskMixin."""
return list(self.get_roots())
@property
def leaves(self) -> list[BaseOperator]:
"""Required by TaskMixin."""
return list(self.get_leaves())
def get_roots(self) -> Generator[BaseOperator, None, None]:
"""Return a generator of tasks with no upstream dependencies within the TaskGroup."""
tasks = list(self)
ids = {x.task_id for x in tasks}
for task in tasks:
if task.upstream_task_ids.isdisjoint(ids):
yield task
def get_leaves(self) -> Generator[BaseOperator, None, None]:
"""Return a generator of tasks with no downstream dependencies within the TaskGroup."""
tasks = list(self)
ids = {x.task_id for x in tasks}
def recurse_for_first_non_setup_teardown(task):
for upstream_task in task.upstream_list:
if upstream_task.task_id not in ids:
continue
if upstream_task.is_setup or upstream_task.is_teardown:
yield from recurse_for_first_non_setup_teardown(upstream_task)
else:
yield upstream_task
for task in tasks:
if task.downstream_task_ids.isdisjoint(ids):
if not (task.is_teardown or task.is_setup):
yield task
else:
yield from recurse_for_first_non_setup_teardown(task)
def child_id(self, label):
"""Prefix label with group_id if prefix_group_id is True. Otherwise return the label as-is."""
if self.prefix_group_id:
group_id = self.group_id
if group_id:
return f"{group_id}.{label}"
return label
@property
def upstream_join_id(self) -> str:
"""
Creates a unique ID for upstream dependencies of this TaskGroup.
If this TaskGroup has immediate upstream TaskGroups or tasks, a proxy node called
upstream_join_id will be created in Graph view to join the outgoing edges from this
TaskGroup to reduce the total number of edges needed to be displayed.
"""
return f"{self.group_id}.upstream_join_id"
@property
def downstream_join_id(self) -> str:
"""
Creates a unique ID for downstream dependencies of this TaskGroup.
If this TaskGroup has immediate downstream TaskGroups or tasks, a proxy node called
downstream_join_id will be created in Graph view to join the outgoing edges from this
TaskGroup to reduce the total number of edges needed to be displayed.
"""
return f"{self.group_id}.downstream_join_id"
def get_task_group_dict(self) -> dict[str, TaskGroup]:
"""Returns a flat dictionary of group_id: TaskGroup."""
task_group_map = {}
def build_map(task_group):
if not isinstance(task_group, TaskGroup):
return
task_group_map[task_group.group_id] = task_group
for child in task_group.children.values():
build_map(child)
build_map(self)
return task_group_map
def get_child_by_label(self, label: str) -> DAGNode:
"""Get a child task/TaskGroup by its label (i.e. task_id/group_id without the group_id prefix)."""
return self.children[self.child_id(label)]
def serialize_for_task_group(self) -> tuple[DagAttributeTypes, Any]:
"""Required by DAGNode."""
from airflow.serialization.serialized_objects import TaskGroupSerialization
return DagAttributeTypes.TASK_GROUP, TaskGroupSerialization.serialize_task_group(self)
def hierarchical_alphabetical_sort(self):
"""
Sorts children in hierarchical alphabetical order.
- groups in alphabetical order first
- tasks in alphabetical order after them.
:return: list of tasks in hierarchical alphabetical order
"""
return sorted(
self.children.values(), key=lambda node: (not isinstance(node, TaskGroup), node.node_id)
)
def topological_sort(self, _include_subdag_tasks: bool = False):
"""
Sorts children in topographical order, such that a task comes after any of its upstream dependencies.
:return: list of tasks in topological order
"""
# This uses a modified version of Kahn's Topological Sort algorithm to
# not have to pre-compute the "in-degree" of the nodes.
from airflow.operators.subdag import SubDagOperator # Avoid circular import
graph_unsorted = copy.copy(self.children)
graph_sorted: list[DAGNode] = []
# special case
if len(self.children) == 0:
return graph_sorted
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted graph. If a set of edges doesn't contain
# any nodes that haven't been resolved, that is, that are still in the unsorted graph, remove the
# pair from the unsorted graph, and append it to the sorted graph. Note here that by using using
# the values() method for iterating, a copy of the unsorted graph is used, allowing us to modify
# the unsorted graph as we move through it.
#
# We also keep a flag for checking that graph is acyclic, which is true if any nodes are resolved
# during each pass through the graph. If not, we need to exit as the graph therefore can't be
# sorted.
acyclic = False
for node in list(graph_unsorted.values()):
for edge in node.upstream_list:
if edge.node_id in graph_unsorted:
break
# Check for task's group is a child (or grand child) of this TG,
tg = edge.task_group
while tg:
if tg.node_id in graph_unsorted:
break
tg = tg.task_group
if tg:
# We are already going to visit that TG
break
else:
acyclic = True
del graph_unsorted[node.node_id]
graph_sorted.append(node)
if _include_subdag_tasks and isinstance(node, SubDagOperator):
graph_sorted.extend(
node.subdag.task_group.topological_sort(_include_subdag_tasks=True)
)
if not acyclic:
raise AirflowDagCycleException(f"A cyclic dependency occurred in dag: {self.dag_id}")
return graph_sorted
def iter_mapped_task_groups(self) -> Iterator[MappedTaskGroup]:
"""Return mapped task groups in the hierarchy.
Groups are returned from the closest to the outmost. If *self* is a
mapped task group, it is returned first.
:meta private:
"""
group: TaskGroup | None = self
while group is not None:
if isinstance(group, MappedTaskGroup):
yield group
group = group.task_group
def iter_tasks(self) -> Iterator[AbstractOperator]:
"""Returns an iterator of the child tasks."""
from airflow.models.abstractoperator import AbstractOperator
groups_to_visit = [self]
while groups_to_visit:
visiting = groups_to_visit.pop(0)
for child in visiting.children.values():
if isinstance(child, AbstractOperator):
yield child
elif isinstance(child, TaskGroup):
groups_to_visit.append(child)
else:
raise ValueError(
f"Encountered a DAGNode that is not a TaskGroup or an AbstractOperator: {type(child)}"
)
def add_task(self, task: AbstractOperator) -> None:
"""Add a task to the task group.
:param task: the task to add
"""
if not TaskGroupContext.active:
raise AirflowException(
"Using this method on a task group that's not a context manager is not supported."
)
task.add_to_taskgroup(self)
def add_to_taskgroup(self, task_group: TaskGroup) -> None:
"""No-op, since we're not a task.
We only add tasks to TaskGroups and not TaskGroup, but we need
this to satisfy the interface.
:meta private:
"""
class MappedTaskGroup(TaskGroup):
"""A mapped task group.
This doesn't really do anything special, just holds some additional metadata
for expansion later.
Don't instantiate this class directly; call *expand* or *expand_kwargs* on
a ``@task_group`` function instead.
"""
def __init__(self, *, expand_input: ExpandInput, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._expand_input = expand_input
for op, _ in expand_input.iter_references():
self.set_upstream(op)
def iter_mapped_dependencies(self) -> Iterator[Operator]:
"""Upstream dependencies that provide XComs used by this mapped task group."""
from airflow.models.xcom_arg import XComArg
for op, _ in XComArg.iter_xcom_references(self._expand_input):
yield op
@cache
def get_parse_time_mapped_ti_count(self) -> int:
"""Number of instances a task in this group should be mapped to, when a DAG run is created.
This only considers literal mapped arguments, and would return *None*
when any non-literal values are used for mapping.
If this group is inside mapped task groups, all the nested counts are
multiplied and accounted.
:meta private:
:raise NotFullyPopulated: If any non-literal mapped arguments are encountered.
:return: The total number of mapped instances each task should have.
"""
return functools.reduce(
operator.mul,
(g._expand_input.get_parse_time_mapped_ti_count() for g in self.iter_mapped_task_groups()),
)
def get_mapped_ti_count(self, run_id: str, *, session: Session) -> int:
"""Number of instances a task in this group should be mapped to at run time.
This considers both literal and non-literal mapped arguments, and the
result is therefore available when all depended tasks have finished. The
return value should be identical to ``parse_time_mapped_ti_count`` if
all mapped arguments are literal.
If this group is inside mapped task groups, all the nested counts are
multiplied and accounted.
:meta private:
:raise NotFullyPopulated: If upstream tasks are not all complete yet.
:return: Total number of mapped TIs this task should have.
"""
groups = self.iter_mapped_task_groups()
return functools.reduce(
operator.mul,
(g._expand_input.get_total_map_length(run_id, session=session) for g in groups),
)
class TaskGroupContext:
"""TaskGroup context is used to keep the current TaskGroup when TaskGroup is used as ContextManager."""
active: bool = False
_context_managed_task_group: TaskGroup | None = None
_previous_context_managed_task_groups: list[TaskGroup] = []
@classmethod
def push_context_managed_task_group(cls, task_group: TaskGroup):
"""Push a TaskGroup into the list of managed TaskGroups."""
if cls._context_managed_task_group:
cls._previous_context_managed_task_groups.append(cls._context_managed_task_group)
cls._context_managed_task_group = task_group
cls.active = True
@classmethod
def pop_context_managed_task_group(cls) -> TaskGroup | None:
"""Pops the last TaskGroup from the list of manged TaskGroups and update the current TaskGroup."""
old_task_group = cls._context_managed_task_group
if cls._previous_context_managed_task_groups:
cls._context_managed_task_group = cls._previous_context_managed_task_groups.pop()
else:
cls._context_managed_task_group = None
cls.active = False
return old_task_group
@classmethod
def get_current_task_group(cls, dag: DAG | None) -> TaskGroup | None:
"""Get the current TaskGroup."""
from airflow.models.dag import DagContext
if not cls._context_managed_task_group:
dag = dag or DagContext.get_current_dag()
if dag:
# If there's currently a DAG but no TaskGroup, return the root TaskGroup of the dag.
return dag.task_group
return cls._context_managed_task_group
def task_group_to_dict(task_item_or_group):
"""Create a nested dict representation of this TaskGroup and its children used to construct the Graph."""
from airflow.models.abstractoperator import AbstractOperator
if isinstance(task := task_item_or_group, AbstractOperator):
setup_teardown_type = {}
if task.is_setup is True:
setup_teardown_type["setupTeardownType"] = "setup"
elif task.is_teardown is True:
setup_teardown_type["setupTeardownType"] = "teardown"
return {
"id": task.task_id,
"value": {
"label": task.label,
"labelStyle": f"fill:{task.ui_fgcolor};",
"style": f"fill:{task.ui_color};",
"rx": 5,
"ry": 5,
**setup_teardown_type,
},
}
task_group = task_item_or_group
is_mapped = isinstance(task_group, MappedTaskGroup)
children = [
task_group_to_dict(child) for child in sorted(task_group.children.values(), key=lambda t: t.label)
]
if task_group.upstream_group_ids or task_group.upstream_task_ids:
children.append(
{
"id": task_group.upstream_join_id,
"value": {
"label": "",
"labelStyle": f"fill:{task_group.ui_fgcolor};",
"style": f"fill:{task_group.ui_color};",
"shape": "circle",
},
}
)
if task_group.downstream_group_ids or task_group.downstream_task_ids:
# This is the join node used to reduce the number of edges between two TaskGroup.
children.append(
{
"id": task_group.downstream_join_id,
"value": {
"label": "",
"labelStyle": f"fill:{task_group.ui_fgcolor};",
"style": f"fill:{task_group.ui_color};",
"shape": "circle",
},
}
)
return {
"id": task_group.group_id,
"value": {
"label": task_group.label,
"labelStyle": f"fill:{task_group.ui_fgcolor};",
"style": f"fill:{task_group.ui_color}",
"rx": 5,
"ry": 5,
"clusterLabelPos": "top",
"tooltip": task_group.tooltip,
"isMapped": is_mapped,
},
"children": children,
}
| 29,657 | 38.809396 | 110 |
py
|
airflow
|
airflow-main/airflow/utils/context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Jinja2 template rendering context helper."""
from __future__ import annotations
import contextlib
import copy
import functools
import warnings
from typing import (
TYPE_CHECKING,
Any,
Container,
ItemsView,
Iterator,
KeysView,
Mapping,
MutableMapping,
SupportsIndex,
ValuesView,
)
import lazy_object_proxy
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.utils.types import NOTSET
if TYPE_CHECKING:
from airflow.models.baseoperator import BaseOperator
# NOTE: Please keep this in sync with Context in airflow/utils/context.pyi.
KNOWN_CONTEXT_KEYS = {
"conf",
"conn",
"dag",
"dag_run",
"data_interval_end",
"data_interval_start",
"ds",
"ds_nodash",
"execution_date",
"expanded_ti_count",
"exception",
"inlets",
"logical_date",
"macros",
"next_ds",
"next_ds_nodash",
"next_execution_date",
"outlets",
"params",
"prev_data_interval_start_success",
"prev_data_interval_end_success",
"prev_ds",
"prev_ds_nodash",
"prev_execution_date",
"prev_execution_date_success",
"prev_start_date_success",
"run_id",
"task",
"task_instance",
"task_instance_key_str",
"test_mode",
"templates_dict",
"ti",
"tomorrow_ds",
"tomorrow_ds_nodash",
"triggering_dataset_events",
"ts",
"ts_nodash",
"ts_nodash_with_tz",
"try_number",
"var",
"yesterday_ds",
"yesterday_ds_nodash",
}
class VariableAccessor:
"""Wrapper to access Variable values in template."""
def __init__(self, *, deserialize_json: bool) -> None:
self._deserialize_json = deserialize_json
self.var: Any = None
def __getattr__(self, key: str) -> Any:
from airflow.models.variable import Variable
self.var = Variable.get(key, deserialize_json=self._deserialize_json)
return self.var
def __repr__(self) -> str:
return str(self.var)
def get(self, key, default: Any = NOTSET) -> Any:
from airflow.models.variable import Variable
if default is NOTSET:
return Variable.get(key, deserialize_json=self._deserialize_json)
return Variable.get(key, default, deserialize_json=self._deserialize_json)
class ConnectionAccessor:
"""Wrapper to access Connection entries in template."""
def __init__(self) -> None:
self.var: Any = None
def __getattr__(self, key: str) -> Any:
from airflow.models.connection import Connection
self.var = Connection.get_connection_from_secrets(key)
return self.var
def __repr__(self) -> str:
return str(self.var)
def get(self, key: str, default_conn: Any = None) -> Any:
from airflow.exceptions import AirflowNotFoundException
from airflow.models.connection import Connection
try:
return Connection.get_connection_from_secrets(key)
except AirflowNotFoundException:
return default_conn
class AirflowContextDeprecationWarning(RemovedInAirflow3Warning):
"""Warn for usage of deprecated context variables in a task."""
def _create_deprecation_warning(key: str, replacements: list[str]) -> RemovedInAirflow3Warning:
message = f"Accessing {key!r} from the template is deprecated and will be removed in a future version."
if not replacements:
return AirflowContextDeprecationWarning(message)
display_except_last = ", ".join(repr(r) for r in replacements[:-1])
if display_except_last:
message += f" Please use {display_except_last} or {replacements[-1]!r} instead."
else:
message += f" Please use {replacements[-1]!r} instead."
return AirflowContextDeprecationWarning(message)
class Context(MutableMapping[str, Any]):
"""Jinja2 template context for task rendering.
This is a mapping (dict-like) class that can lazily emit warnings when
(and only when) deprecated context keys are accessed.
"""
_DEPRECATION_REPLACEMENTS: dict[str, list[str]] = {
"execution_date": ["data_interval_start", "logical_date"],
"next_ds": ["{{ data_interval_end | ds }}"],
"next_ds_nodash": ["{{ data_interval_end | ds_nodash }}"],
"next_execution_date": ["data_interval_end"],
"prev_ds": [],
"prev_ds_nodash": [],
"prev_execution_date": [],
"prev_execution_date_success": ["prev_data_interval_start_success"],
"tomorrow_ds": [],
"tomorrow_ds_nodash": [],
"yesterday_ds": [],
"yesterday_ds_nodash": [],
}
def __init__(self, context: MutableMapping[str, Any] | None = None, **kwargs: Any) -> None:
self._context: MutableMapping[str, Any] = context or {}
if kwargs:
self._context.update(kwargs)
self._deprecation_replacements = self._DEPRECATION_REPLACEMENTS.copy()
def __repr__(self) -> str:
return repr(self._context)
def __reduce_ex__(self, protocol: SupportsIndex) -> tuple[Any, ...]:
"""Pickle the context as a dict.
We are intentionally going through ``__getitem__`` in this function,
instead of using ``items()``, to trigger deprecation warnings.
"""
items = [(key, self[key]) for key in self._context]
return dict, (items,)
def __copy__(self) -> Context:
new = type(self)(copy.copy(self._context))
new._deprecation_replacements = self._deprecation_replacements.copy()
return new
def __getitem__(self, key: str) -> Any:
with contextlib.suppress(KeyError):
warnings.warn(_create_deprecation_warning(key, self._deprecation_replacements[key]))
with contextlib.suppress(KeyError):
return self._context[key]
raise KeyError(key)
def __setitem__(self, key: str, value: Any) -> None:
self._deprecation_replacements.pop(key, None)
self._context[key] = value
def __delitem__(self, key: str) -> None:
self._deprecation_replacements.pop(key, None)
del self._context[key]
def __contains__(self, key: object) -> bool:
return key in self._context
def __iter__(self) -> Iterator[str]:
return iter(self._context)
def __len__(self) -> int:
return len(self._context)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Context):
return NotImplemented
return self._context == other._context
def __ne__(self, other: Any) -> bool:
if not isinstance(other, Context):
return NotImplemented
return self._context != other._context
def keys(self) -> KeysView[str]:
return self._context.keys()
def items(self):
return ItemsView(self._context)
def values(self):
return ValuesView(self._context)
def context_merge(context: Context, *args: Any, **kwargs: Any) -> None:
"""Merge parameters into an existing context.
Like ``dict.update()`` , this take the same parameters, and updates
``context`` in-place.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
context.update(*args, **kwargs)
def context_update_for_unmapped(context: Context, task: BaseOperator) -> None:
"""Update context after task unmapping.
Since ``get_template_context()`` is called before unmapping, the context
contains information about the mapped task. We need to do some in-place
updates to ensure the template context reflects the unmapped task instead.
:meta private:
"""
from airflow.models.param import process_params
context["task"] = context["ti"].task = task
context["params"] = process_params(context["dag"], task, context["dag_run"], suppress_exception=False)
def context_copy_partial(source: Context, keys: Container[str]) -> Context:
"""Create a context by copying items under selected keys in ``source``.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
new = Context({k: v for k, v in source._context.items() if k in keys})
new._deprecation_replacements = source._deprecation_replacements.copy()
return new
def lazy_mapping_from_context(source: Context) -> Mapping[str, Any]:
"""Create a mapping that wraps deprecated entries in a lazy object proxy.
This further delays deprecation warning to until when the entry is actually
used, instead of when it's accessed in the context. The result is useful for
passing into a callable with ``**kwargs``, which would unpack the mapping
too eagerly otherwise.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
if not isinstance(source, Context):
# Sometimes we are passed a plain dict (usually in tests, or in User's
# custom operators) -- be lienent about what we accept so we don't
# break anything for users.
return source
def _deprecated_proxy_factory(k: str, v: Any) -> Any:
replacements = source._deprecation_replacements[k]
warnings.warn(_create_deprecation_warning(k, replacements))
return v
def _create_value(k: str, v: Any) -> Any:
if k not in source._deprecation_replacements:
return v
factory = functools.partial(_deprecated_proxy_factory, k, v)
return lazy_object_proxy.Proxy(factory)
return {k: _create_value(k, v) for k, v in source._context.items()}
| 10,566 | 31.614198 | 107 |
py
|
airflow
|
airflow-main/airflow/utils/scheduler_health.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from http.server import BaseHTTPRequestHandler, HTTPServer
from airflow.configuration import conf
from airflow.jobs.job import Job
from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
log = logging.getLogger(__name__)
class HealthServer(BaseHTTPRequestHandler):
"""Small webserver to serve scheduler health check."""
def do_GET(self):
if self.path == "/health":
try:
with create_session() as session:
scheduler_job = (
session.query(Job)
.filter_by(job_type=SchedulerJobRunner.job_type)
.filter_by(hostname=get_hostname())
.order_by(Job.latest_heartbeat.desc())
.first()
)
if scheduler_job and scheduler_job.is_alive():
self.send_response(200)
self.end_headers()
else:
self.send_error(503)
except Exception:
log.exception("Exception when executing Health check")
self.send_error(503)
else:
self.send_error(404)
def serve_health_check():
health_check_port = conf.getint("scheduler", "SCHEDULER_HEALTH_CHECK_SERVER_PORT")
httpd = HTTPServer(("0.0.0.0", health_check_port), HealthServer)
httpd.serve_forever()
if __name__ == "__main__":
serve_health_check()
| 2,380 | 35.630769 | 86 |
py
|
airflow
|
airflow-main/airflow/utils/timezone.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime as dt
from typing import overload
import pendulum
from dateutil.relativedelta import relativedelta
from pendulum.datetime import DateTime
# UTC time zone as a tzinfo instance.
utc = pendulum.tz.timezone("UTC")
def is_localized(value):
"""Determine if a given datetime.datetime is aware.
The concept is defined in Python documentation. Assuming the tzinfo is
either None or a proper ``datetime.tzinfo`` instance, ``value.utcoffset()``
implements the appropriate logic.
.. seealso:: http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.utcoffset() is not None
def is_naive(value):
"""Determine if a given datetime.datetime is naive.
The concept is defined in Python documentation. Assuming the tzinfo is
either None or a proper ``datetime.tzinfo`` instance, ``value.utcoffset()``
implements the appropriate logic.
.. seealso:: http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.utcoffset() is None
def utcnow() -> dt.datetime:
"""Get the current date and time in UTC."""
# pendulum utcnow() is not used as that sets a TimezoneInfo object
# instead of a Timezone. This is not picklable and also creates issues
# when using replace()
result = dt.datetime.utcnow()
result = result.replace(tzinfo=utc)
return result
def utc_epoch() -> dt.datetime:
"""Gets the epoch in the users timezone."""
# pendulum utcnow() is not used as that sets a TimezoneInfo object
# instead of a Timezone. This is not picklable and also creates issues
# when using replace()
result = dt.datetime(1970, 1, 1)
result = result.replace(tzinfo=utc)
return result
@overload
def convert_to_utc(value: None) -> None:
...
@overload
def convert_to_utc(value: dt.datetime) -> DateTime:
...
def convert_to_utc(value: dt.datetime | None) -> DateTime | None:
"""Creates a datetime with the default timezone added if none is associated.
:param value: datetime
:return: datetime with tzinfo
"""
if value is None:
return value
if not is_localized(value):
from airflow.settings import TIMEZONE
value = pendulum.instance(value, TIMEZONE)
return pendulum.instance(value.astimezone(utc))
@overload
def make_aware(value: None, timezone: dt.tzinfo | None = None) -> None:
...
@overload
def make_aware(value: DateTime, timezone: dt.tzinfo | None = None) -> DateTime:
...
@overload
def make_aware(value: dt.datetime, timezone: dt.tzinfo | None = None) -> dt.datetime:
...
def make_aware(value: dt.datetime | None, timezone: dt.tzinfo | None = None) -> dt.datetime | None:
"""
Make a naive datetime.datetime in a given time zone aware.
:param value: datetime
:param timezone: timezone
:return: localized datetime in settings.TIMEZONE or timezone
"""
if timezone is None:
from airflow.settings import TIMEZONE
timezone = TIMEZONE
if not value:
return None
# Check that we won't overwrite the timezone of an aware datetime.
if is_localized(value):
raise ValueError(f"make_aware expects a naive datetime, got {value}")
if hasattr(value, "fold"):
# In case of python 3.6 we want to do the same that pendulum does for python3.5
# i.e in case we move clock back we want to schedule the run at the time of the second
# instance of the same clock time rather than the first one.
# Fold parameter has no impact in other cases so we can safely set it to 1 here
value = value.replace(fold=1)
localized = getattr(timezone, "localize", None)
if localized is not None:
# This method is available for pytz time zones
return localized(value)
convert = getattr(timezone, "convert", None)
if convert is not None:
# For pendulum
return convert(value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""
Make an aware datetime.datetime naive in a given time zone.
:param value: datetime
:param timezone: timezone
:return: naive datetime
"""
if timezone is None:
from airflow.settings import TIMEZONE
timezone = TIMEZONE
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("make_naive() cannot be applied to a naive datetime")
date = value.astimezone(timezone)
# cross library compatibility
naive = dt.datetime(
date.year, date.month, date.day, date.hour, date.minute, date.second, date.microsecond
)
return naive
def datetime(*args, **kwargs):
"""
Wrapper around datetime.datetime that adds settings.TIMEZONE if tzinfo not specified.
:return: datetime.datetime
"""
if "tzinfo" not in kwargs:
from airflow.settings import TIMEZONE
kwargs["tzinfo"] = TIMEZONE
return dt.datetime(*args, **kwargs)
def parse(string: str, timezone=None) -> DateTime:
"""
Parse a time string and return an aware datetime.
:param string: time string
:param timezone: the timezone
"""
from airflow.settings import TIMEZONE
return pendulum.parse(string, tz=timezone or TIMEZONE, strict=False) # type: ignore
@overload
def coerce_datetime(v: None, tz: dt.tzinfo | None = None) -> None:
...
@overload
def coerce_datetime(v: DateTime, tz: dt.tzinfo | None = None) -> DateTime:
...
@overload
def coerce_datetime(v: dt.datetime, tz: dt.tzinfo | None = None) -> DateTime:
...
def coerce_datetime(v: dt.datetime | None, tz: dt.tzinfo | None = None) -> DateTime | None:
"""Convert ``v`` into a timezone-aware ``pendulum.DateTime``.
* If ``v`` is *None*, *None* is returned.
* If ``v`` is a naive datetime, it is converted to an aware Pendulum DateTime.
* If ``v`` is an aware datetime, it is converted to a Pendulum DateTime.
Note that ``tz`` is **not** taken into account in this case; the datetime
will maintain its original tzinfo!
"""
if v is None:
return None
if isinstance(v, DateTime):
return v if v.tzinfo else make_aware(v, tz)
# Only dt.datetime is left here.
return pendulum.instance(v if v.tzinfo else make_aware(v, tz))
def td_format(td_object: None | dt.timedelta | float | int) -> str | None:
"""
Format a timedelta object or float/int into a readable string for time duration.
For example timedelta(seconds=3752) would become `1h:2M:32s`.
If the time is less than a second, the return will be `<1s`.
"""
if not td_object:
return None
if isinstance(td_object, dt.timedelta):
delta = relativedelta() + td_object
else:
delta = relativedelta(seconds=int(td_object))
# relativedelta for timedelta cannot convert days to months
# so calculate months by assuming 30 day months and normalize
months, delta.days = divmod(delta.days, 30)
delta = delta.normalized() + relativedelta(months=months)
def _format_part(key: str) -> str:
value = int(getattr(delta, key))
if value < 1:
return ""
# distinguish between month/minute following strftime format
# and take first char of each unit, i.e. years='y', days='d'
if key == "minutes":
key = key.upper()
key = key[0]
return f"{value}{key}"
parts = map(_format_part, ("years", "months", "days", "hours", "minutes", "seconds"))
joined = ":".join(part for part in parts if part)
if not joined:
return "<1s"
return joined
| 8,499 | 29.909091 | 99 |
py
|
airflow
|
airflow-main/airflow/utils/compression.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import bz2
import gzip
import shutil
from tempfile import NamedTemporaryFile
def uncompress_file(input_file_name, file_extension, dest_dir):
"""Uncompress gz and bz2 files."""
if file_extension.lower() not in (".gz", ".bz2"):
raise NotImplementedError(
f"Received {file_extension} format. Only gz and bz2 files can currently be uncompressed."
)
if file_extension.lower() == ".gz":
fmodule = gzip.GzipFile
elif file_extension.lower() == ".bz2":
fmodule = bz2.BZ2File
with fmodule(input_file_name, mode="rb") as f_compressed, NamedTemporaryFile(
dir=dest_dir, mode="wb", delete=False
) as f_uncompressed:
shutil.copyfileobj(f_compressed, f_uncompressed)
return f_uncompressed.name
| 1,592 | 37.853659 | 101 |
py
|
airflow
|
airflow-main/airflow/utils/operator_resources.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.configuration import conf
from airflow.exceptions import AirflowException
# Constants for resources (megabytes are the base unit)
MB = 1
GB = 1024 * MB
TB = 1024 * GB
PB = 1024 * TB
EB = 1024 * PB
class Resource:
"""
Represents a resource requirement in an execution environment for an operator.
:param name: Name of the resource
:param units_str: The string representing the units of a resource (e.g. MB for a CPU
resource) to be used for display purposes
:param qty: The number of units of the specified resource that are required for
execution of the operator.
"""
def __init__(self, name, units_str, qty):
if qty < 0:
raise AirflowException(
f"Received resource quantity {qty} for resource {name}, "
f"but resource quantity must be non-negative."
)
self._name = name
self._units_str = units_str
self._qty = qty
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.__dict__)
@property
def name(self):
"""Name of the resource."""
return self._name
@property
def units_str(self):
"""The string representing the units of a resource."""
return self._units_str
@property
def qty(self):
"""The number of units of the specified resource that are required for execution of the operator."""
return self._qty
def to_dict(self):
return {
"name": self.name,
"qty": self.qty,
"units_str": self.units_str,
}
class CpuResource(Resource):
"""Represents a CPU requirement in an execution environment for an operator."""
def __init__(self, qty):
super().__init__("CPU", "core(s)", qty)
class RamResource(Resource):
"""Represents a RAM requirement in an execution environment for an operator."""
def __init__(self, qty):
super().__init__("RAM", "MB", qty)
class DiskResource(Resource):
"""Represents a disk requirement in an execution environment for an operator."""
def __init__(self, qty):
super().__init__("Disk", "MB", qty)
class GpuResource(Resource):
"""Represents a GPU requirement in an execution environment for an operator."""
def __init__(self, qty):
super().__init__("GPU", "gpu(s)", qty)
class Resources:
"""
The resources required by an operator.
Resources that are not specified will use the default values from the airflow config.
:param cpus: The number of cpu cores that are required
:param ram: The amount of RAM required
:param disk: The amount of disk space required
:param gpus: The number of gpu units that are required
"""
def __init__(
self,
cpus=conf.getint("operators", "default_cpus"),
ram=conf.getint("operators", "default_ram"),
disk=conf.getint("operators", "default_disk"),
gpus=conf.getint("operators", "default_gpus"),
):
self.cpus = CpuResource(cpus)
self.ram = RamResource(ram)
self.disk = DiskResource(disk)
self.gpus = GpuResource(gpus)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.__dict__)
def to_dict(self):
return {
"cpus": self.cpus.to_dict(),
"ram": self.ram.to_dict(),
"disk": self.disk.to_dict(),
"gpus": self.gpus.to_dict(),
}
@classmethod
def from_dict(cls, resources_dict: dict):
"""Create resources from resources dict."""
cpus = resources_dict["cpus"]["qty"]
ram = resources_dict["ram"]["qty"]
disk = resources_dict["disk"]["qty"]
gpus = resources_dict["gpus"]["qty"]
return cls(cpus=cpus, ram=ram, disk=disk, gpus=gpus)
| 4,904 | 29.465839 | 108 |
py
|
airflow
|
airflow-main/airflow/utils/dag_parsing_context.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
from contextlib import contextmanager
from typing import NamedTuple
class AirflowParsingContext(NamedTuple):
"""
Context of parsing for the DAG.
If these values are not None, they will contain the specific DAG and Task ID that Airflow is requesting to
execute. You can use these for optimizing dynamically generated DAG files.
"""
dag_id: str | None
task_id: str | None
_AIRFLOW_PARSING_CONTEXT_DAG_ID = "_AIRFLOW_PARSING_CONTEXT_DAG_ID"
_AIRFLOW_PARSING_CONTEXT_TASK_ID = "_AIRFLOW_PARSING_CONTEXT_TASK_ID"
@contextmanager
def _airflow_parsing_context_manager(dag_id: str | None = None, task_id: str | None = None):
old_dag_id = os.environ.get(_AIRFLOW_PARSING_CONTEXT_DAG_ID)
old_task_id = os.environ.get(_AIRFLOW_PARSING_CONTEXT_TASK_ID)
if dag_id is not None:
os.environ[_AIRFLOW_PARSING_CONTEXT_DAG_ID] = dag_id
if task_id is not None:
os.environ[_AIRFLOW_PARSING_CONTEXT_TASK_ID] = task_id
yield
if old_task_id is not None:
os.environ[_AIRFLOW_PARSING_CONTEXT_TASK_ID] = old_task_id
if old_dag_id is not None:
os.environ[_AIRFLOW_PARSING_CONTEXT_DAG_ID] = old_dag_id
def get_parsing_context() -> AirflowParsingContext:
"""Return the current (DAG) parsing context info."""
return AirflowParsingContext(
dag_id=os.environ.get(_AIRFLOW_PARSING_CONTEXT_DAG_ID),
task_id=os.environ.get(_AIRFLOW_PARSING_CONTEXT_TASK_ID),
)
| 2,281 | 36.409836 | 110 |
py
|
airflow
|
airflow-main/airflow/utils/retries.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import functools
import logging
from inspect import signature
from typing import Callable, TypeVar, overload
from sqlalchemy.exc import DBAPIError, OperationalError
from airflow.configuration import conf
F = TypeVar("F", bound=Callable)
MAX_DB_RETRIES = conf.getint("database", "max_db_retries", fallback=3)
def run_with_db_retries(max_retries: int = MAX_DB_RETRIES, logger: logging.Logger | None = None, **kwargs):
"""Return Tenacity Retrying object with project specific default."""
import tenacity
# Default kwargs
retry_kwargs = dict(
retry=tenacity.retry_if_exception_type(exception_types=(OperationalError, DBAPIError)),
wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
stop=tenacity.stop_after_attempt(max_retries),
reraise=True,
**kwargs,
)
if logger and isinstance(logger, logging.Logger):
retry_kwargs["before_sleep"] = tenacity.before_sleep_log(logger, logging.DEBUG, True)
return tenacity.Retrying(**retry_kwargs)
@overload
def retry_db_transaction(*, retries: int = MAX_DB_RETRIES) -> Callable[[F], F]:
...
@overload
def retry_db_transaction(_func: F) -> F:
...
def retry_db_transaction(_func: Callable | None = None, *, retries: int = MAX_DB_RETRIES, **retry_kwargs):
"""Decorator to retry functions in case of ``OperationalError`` from DB.
It should not be used with ``@provide_session``.
"""
def retry_decorator(func: Callable) -> Callable:
# Get Positional argument for 'session'
func_params = signature(func).parameters
try:
# func_params is an ordered dict -- this is the "recommended" way of getting the position
session_args_idx = tuple(func_params).index("session")
except ValueError:
raise ValueError(f"Function {func.__qualname__} has no `session` argument")
# We don't need this anymore -- ensure we don't keep a reference to it by mistake
del func_params
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
logger = args[0].log if args and hasattr(args[0], "log") else logging.getLogger(func.__module__)
# Get session from args or kwargs
if "session" in kwargs:
session = kwargs["session"]
elif len(args) > session_args_idx:
session = args[session_args_idx]
else:
raise TypeError(f"session is a required argument for {func.__qualname__}")
for attempt in run_with_db_retries(max_retries=retries, logger=logger, **retry_kwargs):
with attempt:
logger.debug(
"Running %s with retries. Try %d of %d",
func.__qualname__,
attempt.retry_state.attempt_number,
retries,
)
try:
return func(*args, **kwargs)
except OperationalError:
session.rollback()
raise
return wrapped_function
# Allow using decorator with and without arguments
if _func is None:
return retry_decorator
else:
return retry_decorator(_func)
| 4,109 | 36.027027 | 108 |
py
|
airflow
|
airflow-main/airflow/utils/singleton.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar("T")
class Singleton(type, Generic[T]):
"""Metaclass that allows to implement singleton pattern."""
_instances: dict[Singleton[T], T] = {}
def __call__(cls: Singleton[T], *args, **kwargs) -> T:
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
| 1,223 | 35 | 67 |
py
|
airflow
|
airflow-main/airflow/utils/module_loading.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import pkgutil
from importlib import import_module
from types import ModuleType
from typing import Callable
def import_string(dotted_path: str):
"""
Import a dotted module path and return the attribute/class designated by the last name in the path.
Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit(".", 1)
except ValueError:
raise ImportError(f"{dotted_path} doesn't look like a module path")
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute/class')
def qualname(o: object | Callable) -> str:
"""Convert an attribute/class/function to a string importable by ``import_string``."""
if callable(o) and hasattr(o, "__module__") and hasattr(o, "__name__"):
return f"{o.__module__}.{o.__name__}"
cls = o
if not isinstance(cls, type): # instance or class
cls = type(cls)
name = cls.__qualname__
module = cls.__module__
if module and module != "__builtin__":
return f"{module}.{name}"
return name
def iter_namespace(ns: ModuleType):
return pkgutil.iter_modules(ns.__path__, ns.__name__ + ".")
| 2,132 | 31.318182 | 103 |
py
|
airflow
|
airflow-main/airflow/utils/state.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from enum import Enum
class JobState(str, Enum):
"""All possible states that a Job can be in."""
RUNNING = "running"
SUCCESS = "success"
SHUTDOWN = "shutdown"
RESTARTING = "restarting"
FAILED = "failed"
def __str__(self) -> str:
return self.value
class TaskInstanceState(str, Enum):
"""All possible states that a Task Instance can be in.
Note that None is also allowed, so always use this in a type hint with Optional.
"""
# The scheduler sets a TaskInstance state to None when it's created but not
# yet run, but we don't list it here since TaskInstance is a string enum.
# Use None instead if need this state.
# Set by the scheduler
REMOVED = "removed" # Task vanished from DAG before it ran
SCHEDULED = "scheduled" # Task should run and will be handed to executor soon
# Set by the task instance itself
QUEUED = "queued" # Executor has enqueued the task
RUNNING = "running" # Task is executing
SUCCESS = "success" # Task completed
SHUTDOWN = "shutdown" # External request to shut down (e.g. marked failed when running)
RESTARTING = "restarting" # External request to restart (e.g. cleared when running)
FAILED = "failed" # Task errored out
UP_FOR_RETRY = "up_for_retry" # Task failed but has retries left
UP_FOR_RESCHEDULE = "up_for_reschedule" # A waiting `reschedule` sensor
UPSTREAM_FAILED = "upstream_failed" # One or more upstream deps failed
SKIPPED = "skipped" # Skipped by branching or some other mechanism
DEFERRED = "deferred" # Deferrable operator waiting on a trigger
def __str__(self) -> str:
return self.value
class DagRunState(str, Enum):
"""All possible states that a DagRun can be in.
These are "shared" with TaskInstanceState in some parts of the code,
so please ensure that their values always match the ones with the
same name in TaskInstanceState.
"""
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
FAILED = "failed"
def __str__(self) -> str:
return self.value
class State:
"""Static class with task instance state constants and color methods to avoid hardcoding."""
# Backwards-compat constants for code that does not yet use the enum
# These first three are shared by DagState and TaskState
SUCCESS = TaskInstanceState.SUCCESS
RUNNING = TaskInstanceState.RUNNING
FAILED = TaskInstanceState.FAILED
# These are TaskState only
NONE = None
REMOVED = TaskInstanceState.REMOVED
SCHEDULED = TaskInstanceState.SCHEDULED
QUEUED = TaskInstanceState.QUEUED
SHUTDOWN = TaskInstanceState.SHUTDOWN
RESTARTING = TaskInstanceState.RESTARTING
UP_FOR_RETRY = TaskInstanceState.UP_FOR_RETRY
UP_FOR_RESCHEDULE = TaskInstanceState.UP_FOR_RESCHEDULE
UPSTREAM_FAILED = TaskInstanceState.UPSTREAM_FAILED
SKIPPED = TaskInstanceState.SKIPPED
DEFERRED = TaskInstanceState.DEFERRED
finished_dr_states: frozenset[DagRunState] = frozenset([DagRunState.SUCCESS, DagRunState.FAILED])
unfinished_dr_states: frozenset[DagRunState] = frozenset([DagRunState.QUEUED, DagRunState.RUNNING])
task_states: tuple[TaskInstanceState | None, ...] = (None,) + tuple(TaskInstanceState)
dag_states: tuple[DagRunState, ...] = (
DagRunState.QUEUED,
DagRunState.SUCCESS,
DagRunState.RUNNING,
DagRunState.FAILED,
)
state_color: dict[TaskInstanceState | None, str] = {
None: "lightblue",
TaskInstanceState.QUEUED: "gray",
TaskInstanceState.RUNNING: "lime",
TaskInstanceState.SUCCESS: "green",
TaskInstanceState.SHUTDOWN: "blue",
TaskInstanceState.RESTARTING: "violet",
TaskInstanceState.FAILED: "red",
TaskInstanceState.UP_FOR_RETRY: "gold",
TaskInstanceState.UP_FOR_RESCHEDULE: "turquoise",
TaskInstanceState.UPSTREAM_FAILED: "orange",
TaskInstanceState.SKIPPED: "hotpink",
TaskInstanceState.REMOVED: "lightgrey",
TaskInstanceState.SCHEDULED: "tan",
TaskInstanceState.DEFERRED: "mediumpurple",
}
@classmethod
def color(cls, state):
"""Returns color for a state."""
return cls.state_color.get(state, "white")
@classmethod
def color_fg(cls, state):
"""Black&white colors for a state."""
color = cls.color(state)
if color in ["green", "red"]:
return "white"
return "black"
finished: frozenset[TaskInstanceState] = frozenset(
[
TaskInstanceState.SUCCESS,
TaskInstanceState.FAILED,
TaskInstanceState.SKIPPED,
TaskInstanceState.UPSTREAM_FAILED,
TaskInstanceState.REMOVED,
]
)
"""
A list of states indicating a task has reached a terminal state (i.e. it has "finished") and needs no
further action.
Note that the attempt could have resulted in failure or have been
interrupted; or perhaps never run at all (skip, or upstream_failed) in any
case, it is no longer running.
"""
unfinished: frozenset[TaskInstanceState | None] = frozenset(
[
None,
TaskInstanceState.SCHEDULED,
TaskInstanceState.QUEUED,
TaskInstanceState.RUNNING,
TaskInstanceState.SHUTDOWN,
TaskInstanceState.RESTARTING,
TaskInstanceState.UP_FOR_RETRY,
TaskInstanceState.UP_FOR_RESCHEDULE,
TaskInstanceState.DEFERRED,
]
)
"""
A list of states indicating that a task either has not completed
a run or has not even started.
"""
failed_states: frozenset[TaskInstanceState] = frozenset(
[TaskInstanceState.FAILED, TaskInstanceState.UPSTREAM_FAILED]
)
"""
A list of states indicating that a task or dag is a failed state.
"""
success_states: frozenset[TaskInstanceState] = frozenset(
[TaskInstanceState.SUCCESS, TaskInstanceState.SKIPPED]
)
"""
A list of states indicating that a task or dag is a success state.
"""
terminating_states = frozenset([TaskInstanceState.SHUTDOWN, TaskInstanceState.RESTARTING])
"""
A list of states indicating that a task has been terminated.
"""
| 7,112 | 34.212871 | 105 |
py
|
airflow
|
airflow-main/airflow/utils/dag_edges.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.models import Operator
from airflow.models.abstractoperator import AbstractOperator
from airflow.models.dag import DAG
def dag_edges(dag: DAG):
"""
Create the list of edges needed to construct the Graph view.
A special case is made if a TaskGroup is immediately upstream/downstream of another
TaskGroup or task. Two proxy nodes named upstream_join_id and downstream_join_id are
created for the TaskGroup. Instead of drawing an edge onto every task in the TaskGroup,
all edges are directed onto the proxy nodes. This is to cut down the number of edges on
the graph.
For example: A DAG with TaskGroups group1 and group2:
group1: task1, task2, task3
group2: task4, task5, task6
group2 is downstream of group1:
group1 >> group2
Edges to add (This avoids having to create edges between every task in group1 and group2):
task1 >> downstream_join_id
task2 >> downstream_join_id
task3 >> downstream_join_id
downstream_join_id >> upstream_join_id
upstream_join_id >> task4
upstream_join_id >> task5
upstream_join_id >> task6
"""
# Edges to add between TaskGroup
edges_to_add = set()
# Edges to remove between individual tasks that are replaced by edges_to_add.
edges_to_skip = set()
task_group_map = dag.task_group.get_task_group_dict()
def collect_edges(task_group):
"""Update edges_to_add and edges_to_skip according to TaskGroups."""
if isinstance(task_group, AbstractOperator):
return
for target_id in task_group.downstream_group_ids:
# For every TaskGroup immediately downstream, add edges between downstream_join_id
# and upstream_join_id. Skip edges between individual tasks of the TaskGroups.
target_group = task_group_map[target_id]
edges_to_add.add((task_group.downstream_join_id, target_group.upstream_join_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
for target in target_group.get_roots():
edges_to_skip.add((child.task_id, target.task_id))
edges_to_skip.add((child.task_id, target_group.upstream_join_id))
for child in target_group.get_roots():
edges_to_add.add((target_group.upstream_join_id, child.task_id))
edges_to_skip.add((task_group.downstream_join_id, child.task_id))
# For every individual task immediately downstream, add edges between downstream_join_id and
# the downstream task. Skip edges between individual tasks of the TaskGroup and the
# downstream task.
for target_id in task_group.downstream_task_ids:
edges_to_add.add((task_group.downstream_join_id, target_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
edges_to_skip.add((child.task_id, target_id))
# For every individual task immediately upstream, add edges between the upstream task
# and upstream_join_id. Skip edges between the upstream task and individual tasks
# of the TaskGroup.
for source_id in task_group.upstream_task_ids:
edges_to_add.add((source_id, task_group.upstream_join_id))
for child in task_group.get_roots():
edges_to_add.add((task_group.upstream_join_id, child.task_id))
edges_to_skip.add((source_id, child.task_id))
for child in task_group.children.values():
collect_edges(child)
collect_edges(dag.task_group)
# Collect all the edges between individual tasks
edges = set()
setup_teardown_edges = set()
tasks_to_trace: list[Operator] = dag.roots
while tasks_to_trace:
tasks_to_trace_next: list[Operator] = []
for task in tasks_to_trace:
for child in task.downstream_list:
edge = (task.task_id, child.task_id)
if task.is_setup and child.is_teardown:
setup_teardown_edges.add(edge)
if edge in edges:
continue
edges.add(edge)
tasks_to_trace_next.append(child)
tasks_to_trace = tasks_to_trace_next
result = []
# Build result dicts with the two ends of the edge, plus any extra metadata
# if we have it.
for source_id, target_id in sorted(edges.union(edges_to_add) - edges_to_skip):
record = {"source_id": source_id, "target_id": target_id}
label = dag.get_edge_info(source_id, target_id).get("label")
if (source_id, target_id) in setup_teardown_edges:
record["is_setup_teardown"] = True
if label:
record["label"] = label
result.append(record)
return result
| 5,744 | 42.522727 | 100 |
py
|
airflow
|
airflow-main/airflow/utils/trigger_rule.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from enum import Enum
class TriggerRule(str, Enum):
"""Class with task's trigger rules."""
ALL_SUCCESS = "all_success"
ALL_FAILED = "all_failed"
ALL_DONE = "all_done"
ALL_DONE_SETUP_SUCCESS = "all_done_setup_success"
ONE_SUCCESS = "one_success"
ONE_FAILED = "one_failed"
ONE_DONE = "one_done"
NONE_FAILED = "none_failed"
NONE_FAILED_OR_SKIPPED = "none_failed_or_skipped"
NONE_SKIPPED = "none_skipped"
DUMMY = "dummy"
ALWAYS = "always"
NONE_FAILED_MIN_ONE_SUCCESS = "none_failed_min_one_success"
ALL_SKIPPED = "all_skipped"
@classmethod
def is_valid(cls, trigger_rule: str) -> bool:
"""Validates a trigger rule."""
return trigger_rule in cls.all_triggers()
@classmethod
def all_triggers(cls) -> set[str]:
"""Returns all trigger rules."""
return set(cls.__members__.values())
def __str__(self) -> str:
return self.value
| 1,767 | 32.358491 | 63 |
py
|
airflow
|
airflow-main/airflow/utils/jwt_signer.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any
import jwt
class JWTSigner:
"""
Signs and verifies JWT Token. Used to authorise and verify requests.
:param secret_key: key used to sign the request
:param expiration_time_in_seconds: time after which the token becomes invalid (in seconds)
:param audience: audience that the request is expected to have
:param leeway_in_seconds: leeway that allows for a small clock skew between the two parties
:param algorithm: algorithm used for signing
"""
def __init__(
self,
secret_key: str,
expiration_time_in_seconds: int,
audience: str,
leeway_in_seconds: int = 5,
algorithm: str = "HS512",
):
self._secret_key = secret_key
self._expiration_time_in_seconds = expiration_time_in_seconds
self._audience = audience
self._leeway_in_seconds = leeway_in_seconds
self._algorithm = algorithm
def generate_signed_token(self, extra_payload: dict[str, Any]) -> str:
"""
Generate JWT with extra payload added.
:param extra_payload: extra payload that is added to the signed token
:return: signed token
"""
jwt_dict = {
"aud": self._audience,
"iat": datetime.utcnow(),
"nbf": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(seconds=self._expiration_time_in_seconds),
}
jwt_dict.update(extra_payload)
token = jwt.encode(
jwt_dict,
self._secret_key,
algorithm=self._algorithm,
)
return token
def verify_token(self, token: str) -> dict[str, Any]:
payload = jwt.decode(
token,
self._secret_key,
leeway=timedelta(seconds=self._leeway_in_seconds),
algorithms=[self._algorithm],
options={
"verify_signature": True,
"require": ["exp", "iat", "nbf"],
},
audience=self._audience,
)
return payload
| 2,915 | 33.714286 | 95 |
py
|
airflow
|
airflow-main/airflow/utils/operator_helpers.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import datetime
from typing import Any, Callable, Collection, Mapping, TypeVar
from airflow import settings
from airflow.utils.context import Context, lazy_mapping_from_context
R = TypeVar("R")
DEFAULT_FORMAT_PREFIX = "airflow.ctx."
ENV_VAR_FORMAT_PREFIX = "AIRFLOW_CTX_"
AIRFLOW_VAR_NAME_FORMAT_MAPPING = {
"AIRFLOW_CONTEXT_DAG_ID": {
"default": f"{DEFAULT_FORMAT_PREFIX}dag_id",
"env_var_format": f"{ENV_VAR_FORMAT_PREFIX}DAG_ID",
},
"AIRFLOW_CONTEXT_TASK_ID": {
"default": f"{DEFAULT_FORMAT_PREFIX}task_id",
"env_var_format": f"{ENV_VAR_FORMAT_PREFIX}TASK_ID",
},
"AIRFLOW_CONTEXT_EXECUTION_DATE": {
"default": f"{DEFAULT_FORMAT_PREFIX}execution_date",
"env_var_format": f"{ENV_VAR_FORMAT_PREFIX}EXECUTION_DATE",
},
"AIRFLOW_CONTEXT_TRY_NUMBER": {
"default": f"{DEFAULT_FORMAT_PREFIX}try_number",
"env_var_format": f"{ENV_VAR_FORMAT_PREFIX}TRY_NUMBER",
},
"AIRFLOW_CONTEXT_DAG_RUN_ID": {
"default": f"{DEFAULT_FORMAT_PREFIX}dag_run_id",
"env_var_format": f"{ENV_VAR_FORMAT_PREFIX}DAG_RUN_ID",
},
"AIRFLOW_CONTEXT_DAG_OWNER": {
"default": f"{DEFAULT_FORMAT_PREFIX}dag_owner",
"env_var_format": f"{ENV_VAR_FORMAT_PREFIX}DAG_OWNER",
},
"AIRFLOW_CONTEXT_DAG_EMAIL": {
"default": f"{DEFAULT_FORMAT_PREFIX}dag_email",
"env_var_format": f"{ENV_VAR_FORMAT_PREFIX}DAG_EMAIL",
},
}
def context_to_airflow_vars(context: Mapping[str, Any], in_env_var_format: bool = False) -> dict[str, str]:
"""
Return values used to externally reconstruct relations between dags, dag_runs, tasks and task_instances.
Given a context, this function provides a dictionary of values that can be used to
externally reconstruct relations between dags, dag_runs, tasks and task_instances.
Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if
in_env_var_format is set to True.
:param context: The context for the task_instance of interest.
:param in_env_var_format: If returned vars should be in ABC_DEF_GHI format.
:return: task_instance context as dict.
"""
params = {}
if in_env_var_format:
name_format = "env_var_format"
else:
name_format = "default"
task = context.get("task")
task_instance = context.get("task_instance")
dag_run = context.get("dag_run")
ops = [
(task, "email", "AIRFLOW_CONTEXT_DAG_EMAIL"),
(task, "owner", "AIRFLOW_CONTEXT_DAG_OWNER"),
(task_instance, "dag_id", "AIRFLOW_CONTEXT_DAG_ID"),
(task_instance, "task_id", "AIRFLOW_CONTEXT_TASK_ID"),
(task_instance, "execution_date", "AIRFLOW_CONTEXT_EXECUTION_DATE"),
(task_instance, "try_number", "AIRFLOW_CONTEXT_TRY_NUMBER"),
(dag_run, "run_id", "AIRFLOW_CONTEXT_DAG_RUN_ID"),
]
context_params = settings.get_airflow_context_vars(context)
for key, value in context_params.items():
if not isinstance(key, str):
raise TypeError(f"key <{key}> must be string")
if not isinstance(value, str):
raise TypeError(f"value of key <{key}> must be string, not {type(value)}")
if in_env_var_format:
if not key.startswith(ENV_VAR_FORMAT_PREFIX):
key = ENV_VAR_FORMAT_PREFIX + key.upper()
else:
if not key.startswith(DEFAULT_FORMAT_PREFIX):
key = DEFAULT_FORMAT_PREFIX + key
params[key] = value
for subject, attr, mapping_key in ops:
_attr = getattr(subject, attr, None)
if subject and _attr:
mapping_value = AIRFLOW_VAR_NAME_FORMAT_MAPPING[mapping_key][name_format]
if isinstance(_attr, str):
params[mapping_value] = _attr
elif isinstance(_attr, datetime):
params[mapping_value] = _attr.isoformat()
elif isinstance(_attr, list):
# os env variable value needs to be string
params[mapping_value] = ",".join(_attr)
else:
params[mapping_value] = str(_attr)
return params
class KeywordParameters:
"""Wrapper representing ``**kwargs`` to a callable.
The actual ``kwargs`` can be obtained by calling either ``unpacking()`` or
``serializing()``. They behave almost the same and are only different if
the containing ``kwargs`` is an Airflow Context object, and the calling
function uses ``**kwargs`` in the argument list.
In this particular case, ``unpacking()`` uses ``lazy-object-proxy`` to
prevent the Context from emitting deprecation warnings too eagerly when it's
unpacked by ``**``. ``serializing()`` does not do this, and will allow the
warnings to be emitted eagerly, which is useful when you want to dump the
content and use it somewhere else without needing ``lazy-object-proxy``.
"""
def __init__(self, kwargs: Mapping[str, Any], *, wildcard: bool) -> None:
self._kwargs = kwargs
self._wildcard = wildcard
@classmethod
def determine(
cls,
func: Callable[..., Any],
args: Collection[Any],
kwargs: Mapping[str, Any],
) -> KeywordParameters:
import inspect
import itertools
signature = inspect.signature(func)
has_wildcard_kwargs = any(p.kind == p.VAR_KEYWORD for p in signature.parameters.values())
for name in itertools.islice(signature.parameters.keys(), len(args)):
# Check if args conflict with names in kwargs.
if name in kwargs:
raise ValueError(f"The key {name!r} in args is a part of kwargs and therefore reserved.")
if has_wildcard_kwargs:
# If the callable has a **kwargs argument, it's ready to accept all the kwargs.
return cls(kwargs, wildcard=True)
# If the callable has no **kwargs argument, it only wants the arguments it requested.
kwargs = {key: kwargs[key] for key in signature.parameters if key in kwargs}
return cls(kwargs, wildcard=False)
def unpacking(self) -> Mapping[str, Any]:
"""Dump the kwargs mapping to unpack with ``**`` in a function call."""
if self._wildcard and isinstance(self._kwargs, Context):
return lazy_mapping_from_context(self._kwargs)
return self._kwargs
def serializing(self) -> Mapping[str, Any]:
"""Dump the kwargs mapping for serialization purposes."""
return self._kwargs
def determine_kwargs(
func: Callable[..., Any],
args: Collection[Any],
kwargs: Mapping[str, Any],
) -> Mapping[str, Any]:
"""
Inspect the signature of a callable to determine which kwargs need to be passed to the callable.
:param func: The callable that you want to invoke
:param args: The positional arguments that need to be passed to the callable, so we know how many to skip.
:param kwargs: The keyword arguments that need to be filtered before passing to the callable.
:return: A dictionary which contains the keyword arguments that are compatible with the callable.
"""
return KeywordParameters.determine(func, args, kwargs).unpacking()
def make_kwargs_callable(func: Callable[..., R]) -> Callable[..., R]:
"""
Creates a new callable that only forwards necessary arguments from any provided input.
Make a new callable that can accept any number of positional or keyword arguments
but only forwards those required by the given callable func.
"""
import functools
@functools.wraps(func)
def kwargs_func(*args, **kwargs):
kwargs = determine_kwargs(func, args, kwargs)
return func(*args, **kwargs)
return kwargs_func
| 8,559 | 38.813953 | 110 |
py
|
airflow
|
airflow-main/airflow/utils/file.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import ast
import io
import logging
import os
import zipfile
from collections import OrderedDict
from pathlib import Path
from typing import Generator, NamedTuple, Pattern, Protocol, overload
import re2
from pathspec.patterns import GitWildMatchPattern
from airflow.configuration import conf
from airflow.exceptions import RemovedInAirflow3Warning
log = logging.getLogger(__name__)
class _IgnoreRule(Protocol):
"""Interface for ignore rules for structural subtyping."""
@staticmethod
def compile(pattern: str, base_dir: Path, definition_file: Path) -> _IgnoreRule | None:
"""
Build an ignore rule from the supplied pattern.
``base_dir`` and ``definition_file`` should be absolute paths.
"""
@staticmethod
def match(path: Path, rules: list[_IgnoreRule]) -> bool:
"""Match a candidate absolute path against a list of rules."""
class _RegexpIgnoreRule(NamedTuple):
"""Typed namedtuple with utility functions for regexp ignore rules."""
pattern: Pattern
base_dir: Path
@staticmethod
def compile(pattern: str, base_dir: Path, definition_file: Path) -> _IgnoreRule | None:
"""Build an ignore rule from the supplied regexp pattern and log a useful warning if it is invalid."""
try:
return _RegexpIgnoreRule(re2.compile(pattern), base_dir)
except re2.error as e:
log.warning("Ignoring invalid regex '%s' from %s: %s", pattern, definition_file, e)
return None
@staticmethod
def match(path: Path, rules: list[_IgnoreRule]) -> bool:
"""Match a list of ignore rules against the supplied path."""
for rule in rules:
if not isinstance(rule, _RegexpIgnoreRule):
raise ValueError(f"_RegexpIgnoreRule cannot match rules of type: {type(rule)}")
if rule.pattern.search(str(path.relative_to(rule.base_dir))) is not None:
return True
return False
class _GlobIgnoreRule(NamedTuple):
"""Typed namedtuple with utility functions for glob ignore rules."""
pattern: Pattern
raw_pattern: str
include: bool | None = None
relative_to: Path | None = None
@staticmethod
def compile(pattern: str, _, definition_file: Path) -> _IgnoreRule | None:
"""Build an ignore rule from the supplied glob pattern and log a useful warning if it is invalid."""
relative_to: Path | None = None
if pattern.strip() == "/":
# "/" doesn't match anything in gitignore
log.warning("Ignoring no-op glob pattern '/' from %s", definition_file)
return None
if pattern.startswith("/") or "/" in pattern.rstrip("/"):
# See https://git-scm.com/docs/gitignore
# > If there is a separator at the beginning or middle (or both) of the pattern, then the
# > pattern is relative to the directory level of the particular .gitignore file itself.
# > Otherwise the pattern may also match at any level below the .gitignore level.
relative_to = definition_file.parent
ignore_pattern = GitWildMatchPattern(pattern)
return _GlobIgnoreRule(ignore_pattern.regex, pattern, ignore_pattern.include, relative_to)
@staticmethod
def match(path: Path, rules: list[_IgnoreRule]) -> bool:
"""Match a list of ignore rules against the supplied path."""
matched = False
for r in rules:
if not isinstance(r, _GlobIgnoreRule):
raise ValueError(f"_GlobIgnoreRule cannot match rules of type: {type(r)}")
rule: _GlobIgnoreRule = r # explicit typing to make mypy play nicely
rel_path = str(path.relative_to(rule.relative_to) if rule.relative_to else path.name)
if rule.raw_pattern.endswith("/") and path.is_dir():
# ensure the test path will potentially match a directory pattern if it is a directory
rel_path += "/"
if rule.include is not None and rule.pattern.match(rel_path) is not None:
matched = rule.include
return matched
def TemporaryDirectory(*args, **kwargs):
"""This function is deprecated. Please use `tempfile.TemporaryDirectory`."""
import warnings
from tempfile import TemporaryDirectory as TmpDir
warnings.warn(
"This function is deprecated. Please use `tempfile.TemporaryDirectory`",
RemovedInAirflow3Warning,
stacklevel=2,
)
return TmpDir(*args, **kwargs)
def mkdirs(path, mode):
"""
Creates the directory specified by path, creating intermediate directories as necessary.
If directory already exists, this is a no-op.
:param path: The directory to create
:param mode: The mode to give to the directory e.g. 0o755, ignores umask
"""
import warnings
warnings.warn(
f"This function is deprecated. Please use `pathlib.Path({path}).mkdir`",
RemovedInAirflow3Warning,
stacklevel=2,
)
Path(path).mkdir(mode=mode, parents=True, exist_ok=True)
ZIP_REGEX = re2.compile(rf"((.*\.zip){re2.escape(os.sep)})?(.*)")
@overload
def correct_maybe_zipped(fileloc: None) -> None:
...
@overload
def correct_maybe_zipped(fileloc: str | Path) -> str | Path:
...
def correct_maybe_zipped(fileloc: None | str | Path) -> None | str | Path:
"""If the path contains a folder with a .zip suffix, treat it as a zip archive and return path."""
if not fileloc:
return fileloc
search_ = ZIP_REGEX.search(str(fileloc))
if not search_:
return fileloc
_, archive, _ = search_.groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
def open_maybe_zipped(fileloc, mode="r"):
"""
Opens the given file.
If the path contains a folder with a .zip suffix, then the folder
is treated as a zip archive, opening the file inside the archive.
:return: a file object, as in `open`, or as in `ZipFile.open`.
"""
_, archive, filename = ZIP_REGEX.search(fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return io.TextIOWrapper(zipfile.ZipFile(archive, mode=mode).open(filename))
else:
return open(fileloc, mode=mode)
def _find_path_from_directory(
base_dir_path: str | os.PathLike[str],
ignore_file_name: str,
ignore_rule_type: type[_IgnoreRule],
) -> Generator[str, None, None]:
"""Recursively search the base path and return the list of file paths that should not be ignored.
:param base_dir_path: the base path to be searched
:param ignore_file_name: the file name containing regular expressions for files that should be ignored.
:param ignore_rule_type: the concrete class for ignore rules, which implements the _IgnoreRule interface.
:return: a generator of file paths which should not be ignored.
"""
# A Dict of patterns, keyed using resolved, absolute paths
patterns_by_dir: dict[Path, list[_IgnoreRule]] = {}
for root, dirs, files in os.walk(base_dir_path, followlinks=True):
patterns: list[_IgnoreRule] = patterns_by_dir.get(Path(root).resolve(), [])
ignore_file_path = Path(root) / ignore_file_name
if ignore_file_path.is_file():
with open(ignore_file_path) as ifile:
lines_no_comments = [re2.sub(r"\s*#.*", "", line) for line in ifile.read().split("\n")]
# append new patterns and filter out "None" objects, which are invalid patterns
patterns += [
p
for p in [
ignore_rule_type.compile(line, Path(base_dir_path), ignore_file_path)
for line in lines_no_comments
if line
]
if p is not None
]
# evaluation order of patterns is important with negation
# so that later patterns can override earlier patterns
patterns = list(OrderedDict.fromkeys(patterns).keys())
dirs[:] = [subdir for subdir in dirs if not ignore_rule_type.match(Path(root) / subdir, patterns)]
# explicit loop for infinite recursion detection since we are following symlinks in this walk
for sd in dirs:
dirpath = (Path(root) / sd).resolve()
if dirpath in patterns_by_dir:
raise RuntimeError(
"Detected recursive loop when walking DAG directory "
f"{base_dir_path}: {dirpath} has appeared more than once."
)
patterns_by_dir.update({dirpath: patterns.copy()})
for file in files:
if file == ignore_file_name:
continue
abs_file_path = Path(root) / file
if ignore_rule_type.match(abs_file_path, patterns):
continue
yield str(abs_file_path)
def find_path_from_directory(
base_dir_path: str | os.PathLike[str],
ignore_file_name: str,
ignore_file_syntax: str = conf.get_mandatory_value("core", "DAG_IGNORE_FILE_SYNTAX", fallback="regexp"),
) -> Generator[str, None, None]:
"""Recursively search the base path for a list of file paths that should not be ignored.
:param base_dir_path: the base path to be searched
:param ignore_file_name: the file name in which specifies the patterns of files/dirs to be ignored
:param ignore_file_syntax: the syntax of patterns in the ignore file: regexp or glob
:return: a generator of file paths.
"""
if ignore_file_syntax == "glob":
return _find_path_from_directory(base_dir_path, ignore_file_name, _GlobIgnoreRule)
elif ignore_file_syntax == "regexp" or not ignore_file_syntax:
return _find_path_from_directory(base_dir_path, ignore_file_name, _RegexpIgnoreRule)
else:
raise ValueError(f"Unsupported ignore_file_syntax: {ignore_file_syntax}")
def list_py_file_paths(
directory: str | os.PathLike[str] | None,
safe_mode: bool = conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE", fallback=True),
include_examples: bool | None = None,
) -> list[str]:
"""Traverse a directory and look for Python files.
:param directory: the directory to traverse
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:param include_examples: include example DAGs
:return: a list of paths to Python files in the specified directory
"""
if include_examples is None:
include_examples = conf.getboolean("core", "LOAD_EXAMPLES")
file_paths: list[str] = []
if directory is None:
file_paths = []
elif os.path.isfile(directory):
file_paths = [str(directory)]
elif os.path.isdir(directory):
file_paths.extend(find_dag_file_paths(directory, safe_mode))
if include_examples:
from airflow import example_dags
example_dag_folder = next(iter(example_dags.__path__))
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, include_examples=False))
return file_paths
def find_dag_file_paths(directory: str | os.PathLike[str], safe_mode: bool) -> list[str]:
"""Finds file paths of all DAG files."""
file_paths = []
for file_path in find_path_from_directory(directory, ".airflowignore"):
try:
if not os.path.isfile(file_path):
continue
_, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != ".py" and not zipfile.is_zipfile(file_path):
continue
if not might_contain_dag(file_path, safe_mode):
continue
file_paths.append(file_path)
except Exception:
log.exception("Error while examining %s", file_path)
return file_paths
COMMENT_PATTERN = re2.compile(r"\s*#.*")
def might_contain_dag(file_path: str, safe_mode: bool, zip_file: zipfile.ZipFile | None = None) -> bool:
"""
Check whether a Python file contains Airflow DAGs.
When safe_mode is off (with False value), this function always returns True.
If might_contain_dag_callable isn't specified, it uses airflow default heuristic
"""
if not safe_mode:
return True
might_contain_dag_callable = conf.getimport(
"core",
"might_contain_dag_callable",
fallback="airflow.utils.file.might_contain_dag_via_default_heuristic",
)
return might_contain_dag_callable(file_path=file_path, zip_file=zip_file)
def might_contain_dag_via_default_heuristic(file_path: str, zip_file: zipfile.ZipFile | None = None) -> bool:
"""
Heuristic that guesses whether a Python file contains an Airflow DAG definition.
:param file_path: Path to the file to be checked.
:param zip_file: if passed, checks the archive. Otherwise, check local filesystem.
:return: True, if file might contain DAGs.
"""
if zip_file:
with zip_file.open(file_path) as current_file:
content = current_file.read()
else:
if zipfile.is_zipfile(file_path):
return True
with open(file_path, "rb") as dag_file:
content = dag_file.read()
content = content.lower()
return all(s in content for s in (b"dag", b"airflow"))
def _find_imported_modules(module: ast.Module) -> Generator[str, None, None]:
for st in module.body:
if isinstance(st, ast.Import):
for n in st.names:
yield n.name
elif isinstance(st, ast.ImportFrom) and st.module is not None:
yield st.module
def iter_airflow_imports(file_path: str) -> Generator[str, None, None]:
"""Find Airflow modules imported in the given file."""
try:
parsed = ast.parse(Path(file_path).read_bytes())
except Exception:
return
for m in _find_imported_modules(parsed):
if m.startswith("airflow."):
yield m
| 14,926 | 37.274359 | 110 |
py
|
airflow
|
airflow-main/airflow/utils/timeout.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import signal
from threading import Timer
from typing import ContextManager
from airflow.exceptions import AirflowTaskTimeout
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.platform import IS_WINDOWS
_timeout = ContextManager[None]
class TimeoutWindows(_timeout, LoggingMixin):
"""Windows timeout version: To be used in a ``with`` block and timeout its content."""
def __init__(self, seconds=1, error_message="Timeout"):
super().__init__()
self._timer: Timer | None = None
self.seconds = seconds
self.error_message = error_message + ", PID: " + str(os.getpid())
def handle_timeout(self, *args):
"""Logs information and raises AirflowTaskTimeout."""
self.log.error("Process timed out, PID: %s", str(os.getpid()))
raise AirflowTaskTimeout(self.error_message)
def __enter__(self):
if self._timer:
self._timer.cancel()
self._timer = Timer(self.seconds, self.handle_timeout)
self._timer.start()
def __exit__(self, type_, value, traceback):
if self._timer:
self._timer.cancel()
self._timer = None
class TimeoutPosix(_timeout, LoggingMixin):
"""POSIX Timeout version: To be used in a ``with`` block and timeout its content."""
def __init__(self, seconds=1, error_message="Timeout"):
super().__init__()
self.seconds = seconds
self.error_message = error_message + ", PID: " + str(os.getpid())
def handle_timeout(self, signum, frame):
"""Logs information and raises AirflowTaskTimeout."""
self.log.error("Process timed out, PID: %s", str(os.getpid()))
raise AirflowTaskTimeout(self.error_message)
def __enter__(self):
try:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.setitimer(signal.ITIMER_REAL, self.seconds)
except ValueError:
self.log.warning("timeout can't be used in the current context", exc_info=True)
def __exit__(self, type_, value, traceback):
try:
signal.setitimer(signal.ITIMER_REAL, 0)
except ValueError:
self.log.warning("timeout can't be used in the current context", exc_info=True)
if IS_WINDOWS:
timeout: type[TimeoutWindows | TimeoutPosix] = TimeoutWindows
else:
timeout = TimeoutPosix
| 3,199 | 34.955056 | 91 |
py
|
airflow
|
airflow-main/airflow/utils/configuration.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import os
from tempfile import mkstemp
from airflow.configuration import conf
from airflow.utils.platform import IS_WINDOWS
def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):
"""
Returns a path for a temporary file including a full copy of the configuration settings.
:param include_env: Should the value of configuration from ``AIRFLOW__``
environment variables be included or not
:param include_cmds: Should the result of calling any *_cmd config be
set (True, default), or should the _cmd options be left as the
command to run (False)
:return: a path to a temporary file
"""
cfg_dict = conf.as_dict(
display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env
)
temp_fd, cfg_path = mkstemp()
with os.fdopen(temp_fd, "w") as temp_file:
# Set the permissions before we write anything to it.
if chmod is not None and not IS_WINDOWS:
os.fchmod(temp_fd, chmod)
json.dump(cfg_dict, temp_file)
return cfg_path
| 1,911 | 36.490196 | 92 |
py
|
airflow
|
airflow-main/airflow/utils/types.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import enum
from typing import TYPE_CHECKING
from airflow.typing_compat import TypedDict
if TYPE_CHECKING:
from datetime import datetime
class ArgNotSet:
"""Sentinel type for annotations, useful when None is not viable.
Use like this::
def is_arg_passed(arg: Union[ArgNotSet, None] = NOTSET) -> bool:
if arg is NOTSET:
return False
return True
is_arg_passed() # False.
is_arg_passed(None) # True.
"""
NOTSET = ArgNotSet()
"""Sentinel value for argument default. See ``ArgNotSet``."""
class DagRunType(str, enum.Enum):
"""Class with DagRun types."""
BACKFILL_JOB = "backfill"
SCHEDULED = "scheduled"
MANUAL = "manual"
DATASET_TRIGGERED = "dataset_triggered"
def __str__(self) -> str:
return self.value
def generate_run_id(self, logical_date: datetime) -> str:
return f"{self}__{logical_date.isoformat()}"
@staticmethod
def from_run_id(run_id: str) -> DagRunType:
"""Resolved DagRun type from run_id."""
for run_type in DagRunType:
if run_id and run_id.startswith(f"{run_type.value}__"):
return run_type
return DagRunType.MANUAL
class EdgeInfoType(TypedDict):
"""Extra metadata that the DAG can store about an edge, usually generated from an EdgeModifier."""
label: str | None
| 2,208 | 28.851351 | 102 |
py
|
airflow
|
airflow-main/airflow/utils/cli.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities module for cli."""
from __future__ import annotations
import functools
import logging
import os
import socket
import sys
import threading
import traceback
import warnings
from argparse import Namespace
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Callable, TypeVar, cast
import re2
from sqlalchemy.orm import Session
from airflow import settings
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.utils import cli_action_loggers
from airflow.utils.log.non_caching_file_handler import NonCachingFileHandler
from airflow.utils.platform import getuser, is_terminal_support_colors
from airflow.utils.session import NEW_SESSION, provide_session
T = TypeVar("T", bound=Callable)
if TYPE_CHECKING:
from airflow.models.dag import DAG
logger = logging.getLogger(__name__)
def _check_cli_args(args):
if not args:
raise ValueError("Args should be set")
if not isinstance(args[0], Namespace):
raise ValueError(
f"1st positional argument should be argparse.Namespace instance, but is {type(args[0])}"
)
def action_cli(func=None, check_db=True):
def action_logging(f: T) -> T:
"""
Decorates function to execute function at the same time submitting action_logging but in CLI context.
It will call action logger callbacks twice, one for
pre-execution and the other one for post-execution.
Action logger will be called with below keyword parameters:
sub_command : name of sub-command
start_datetime : start datetime instance by utc
end_datetime : end datetime instance by utc
full_command : full command line arguments
user : current user
log : airflow.models.log.Log ORM instance
dag_id : dag id (optional)
task_id : task_id (optional)
execution_date : execution date (optional)
error : exception instance if there's an exception
:param f: function instance
:return: wrapped function
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
A wrapper for cli functions; assumes Namespace instance as first positional argument.
:param args: Positional argument. It assumes to have Namespace instance
at 1st positional argument
:param kwargs: A passthrough keyword argument
"""
_check_cli_args(args)
metrics = _build_metrics(f.__name__, args[0])
cli_action_loggers.on_pre_execution(**metrics)
verbose = getattr(args[0], "verbose", False)
root_logger = logging.getLogger()
if verbose:
root_logger.setLevel(logging.DEBUG)
for handler in root_logger.handlers:
handler.setLevel(logging.DEBUG)
try:
# Check and run migrations if necessary
if check_db:
from airflow.configuration import conf
from airflow.utils.db import check_and_run_migrations, synchronize_log_template
if conf.getboolean("database", "check_migrations"):
check_and_run_migrations()
synchronize_log_template()
return f(*args, **kwargs)
except Exception as e:
metrics["error"] = e
raise
finally:
metrics["end_datetime"] = datetime.utcnow()
cli_action_loggers.on_post_execution(**metrics)
return cast(T, wrapper)
if func:
return action_logging(func)
return action_logging
def _build_metrics(func_name, namespace):
"""
Builds metrics dict from function args.
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
sub_commands_to_check = {"users", "connections"}
sensitive_fields = {"-p", "--password", "--conn-password"}
full_command = list(sys.argv)
sub_command = full_command[1] if len(full_command) > 1 else None
if sub_command in sub_commands_to_check:
for idx, command in enumerate(full_command):
if command in sensitive_fields:
# For cases when password is passed as "--password xyz" (with space between key and value)
full_command[idx + 1] = "*" * 8
else:
# For cases when password is passed as "--password=xyz" (with '=' between key and value)
for sensitive_field in sensitive_fields:
if command.startswith(f"{sensitive_field}="):
full_command[idx] = f'{sensitive_field}={"*" * 8}'
metrics = {
"sub_command": func_name,
"start_datetime": datetime.utcnow(),
"full_command": f"{full_command}",
"user": getuser(),
}
if not isinstance(namespace, Namespace):
raise ValueError(
f"namespace argument should be argparse.Namespace instance, but is {type(namespace)}"
)
tmp_dic = vars(namespace)
metrics["dag_id"] = tmp_dic.get("dag_id")
metrics["task_id"] = tmp_dic.get("task_id")
metrics["execution_date"] = tmp_dic.get("execution_date")
metrics["host_name"] = socket.gethostname()
return metrics
def process_subdir(subdir: str | None):
"""Expands path to absolute by replacing 'DAGS_FOLDER', '~', '.', etc."""
if subdir:
if not settings.DAGS_FOLDER:
raise ValueError("DAGS_FOLDER variable in settings should be filled.")
subdir = subdir.replace("DAGS_FOLDER", settings.DAGS_FOLDER)
subdir = os.path.abspath(os.path.expanduser(subdir))
return subdir
def get_dag_by_file_location(dag_id: str):
"""Returns DAG of a given dag_id by looking up file location."""
from airflow.models import DagBag, DagModel
# Benefit is that logging from other dags in dagbag will not appear
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise AirflowException(
f"Dag {dag_id!r} could not be found; either it does not exist or it failed to parse."
)
dagbag = DagBag(dag_folder=dag_model.fileloc)
return dagbag.dags[dag_id]
def _search_for_dag_file(val: str | None) -> str | None:
"""
Search for the file referenced at fileloc.
By the time we get to this function, we've already run this `val` through `process_subdir`
and loaded the DagBag there and came up empty. So here, if `val` is a file path, we make
a last ditch effort to try and find a dag file with the same name in our dags folder. (This
avoids the unnecessary dag parsing that would occur if we just parsed the dags folder).
If `val` is a path to a file, this likely means that the serializing process had a dags_folder
equal to only the dag file in question. This prevents us from determining the relative location.
And if the paths are different between worker and dag processor / scheduler, then we won't find
the dag at the given location.
"""
if val and Path(val).suffix in (".zip", ".py"):
matches = list(Path(settings.DAGS_FOLDER).rglob(Path(val).name))
if len(matches) == 1:
return matches[0].as_posix()
return None
def get_dag(subdir: str | None, dag_id: str, from_db: bool = False) -> DAG:
"""
Returns DAG of a given dag_id.
First we'll try to use the given subdir. If that doesn't work, we'll try to
find the correct path (assuming it's a file) and failing that, use the configured
dags folder.
"""
from airflow.models import DagBag
if from_db:
dagbag = DagBag(read_dags_from_db=True)
else:
first_path = process_subdir(subdir)
dagbag = DagBag(first_path)
dag = dagbag.get_dag(dag_id)
if not dag:
if from_db:
raise AirflowException(f"Dag {dag_id!r} could not be found in DagBag read from database.")
fallback_path = _search_for_dag_file(subdir) or settings.DAGS_FOLDER
logger.warning("Dag %r not found in path %s; trying path %s", dag_id, first_path, fallback_path)
dagbag = DagBag(dag_folder=fallback_path)
dag = dagbag.get_dag(dag_id)
if not dag:
raise AirflowException(
f"Dag {dag_id!r} could not be found; either it does not exist or it failed to parse."
)
return dag
def get_dags(subdir: str | None, dag_id: str, use_regex: bool = False):
"""Returns DAG(s) matching a given regex or dag_id."""
from airflow.models import DagBag
if not use_regex:
return [get_dag(subdir, dag_id)]
dagbag = DagBag(process_subdir(subdir))
matched_dags = [dag for dag in dagbag.dags.values() if re2.search(dag_id, dag.dag_id)]
if not matched_dags:
raise AirflowException(
f"dag_id could not be found with regex: {dag_id}. Either the dag did not exist or "
f"it failed to parse."
)
return matched_dags
@provide_session
def get_dag_by_pickle(pickle_id: int, session: Session = NEW_SESSION) -> DAG:
"""Fetch DAG from the database using pickling."""
from airflow.models import DagPickle
dag_pickle = session.query(DagPickle).filter(DagPickle.id == pickle_id).first()
if not dag_pickle:
raise AirflowException(f"pickle_id could not be found in DagPickle.id list: {pickle_id}")
pickle_dag = dag_pickle.pickle
return pickle_dag
def setup_locations(process, pid=None, stdout=None, stderr=None, log=None):
"""Creates logging paths."""
if not stderr:
stderr = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.err")
if not stdout:
stdout = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.out")
if not log:
log = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.log")
if not pid:
pid = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.pid")
else:
pid = os.path.abspath(pid)
return pid, stdout, stderr, log
def setup_logging(filename):
"""Creates log file handler for daemon process."""
root = logging.getLogger()
handler = NonCachingFileHandler(filename)
formatter = logging.Formatter(settings.SIMPLE_LOG_FORMAT)
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(settings.LOGGING_LEVEL)
return handler.stream
def sigint_handler(sig, frame):
"""
Returns without error on SIGINT or SIGTERM signals in interactive command mode.
e.g. CTRL+C or kill <PID>
"""
sys.exit(0)
def sigquit_handler(sig, frame):
"""
Helps debug deadlocks by printing stacktraces when this gets a SIGQUIT.
e.g. kill -s QUIT <PID> or CTRL+
"""
print(f"Dumping stack traces for all threads in PID {os.getpid()}")
id_to_name = {th.ident: th.name for th in threading.enumerate()}
code = []
for thread_id, stack in sys._current_frames().items():
code.append(f"\n# Thread: {id_to_name.get(thread_id, '')}({thread_id})")
for filename, line_number, name, line in traceback.extract_stack(stack):
code.append(f'File: "{filename}", line {line_number}, in {name}')
if line:
code.append(f" {line.strip()}")
print("\n".join(code))
class ColorMode:
"""Coloring modes. If `auto` is then automatically detected."""
ON = "on"
OFF = "off"
AUTO = "auto"
def should_use_colors(args) -> bool:
"""Processes arguments and decides whether to enable color in output."""
if args.color == ColorMode.ON:
return True
if args.color == ColorMode.OFF:
return False
return is_terminal_support_colors()
def should_ignore_depends_on_past(args) -> bool:
if args.ignore_depends_on_past:
warnings.warn(
"Using `--ignore-depends-on-past` is Deprecated."
"Please use `--depends-on-past ignore` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return True
return args.depends_on_past == "ignore"
def suppress_logs_and_warning(f: T) -> T:
"""Decorator to suppress logging and warning messages in cli functions."""
@functools.wraps(f)
def _wrapper(*args, **kwargs):
_check_cli_args(args)
if args[0].verbose:
f(*args, **kwargs)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
logging.disable(logging.CRITICAL)
try:
f(*args, **kwargs)
finally:
# logging output again depends on the effective
# levels of individual loggers
logging.disable(logging.NOTSET)
return cast(T, _wrapper)
| 13,883 | 35.34555 | 109 |
py
|
airflow
|
airflow-main/airflow/utils/platform.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Platform and system specific function."""
from __future__ import annotations
import getpass
import logging
import os
import pkgutil
import platform
import sys
from airflow.compat.functools import cache
IS_WINDOWS = platform.system() == "Windows"
log = logging.getLogger(__name__)
def is_tty():
"""Check if stdout is connected (is associated with a terminal device) to a tty(-like) device."""
if not hasattr(sys.stdout, "isatty"):
return False
return sys.stdout.isatty()
def is_terminal_support_colors() -> bool:
"""Try to determine if the current terminal supports colors."""
if sys.platform == "win32":
return False
if not is_tty():
return False
if "COLORTERM" in os.environ:
return True
term = os.environ.get("TERM", "dumb").lower()
if term in ("xterm", "linux") or "color" in term:
return True
return False
def get_airflow_git_version():
"""Returns the git commit hash representing the current version of the application."""
git_version = None
try:
git_version = str(pkgutil.get_data("airflow", "git_version"), encoding="UTF-8")
except Exception as e:
log.debug(e)
return git_version
@cache
def getuser() -> str:
"""
Get the username of the current user, or error with a nice error message if there's no current user.
We don't want to fall back to os.getuid() because not having a username
probably means the rest of the user environment is wrong (e.g. no $HOME).
Explicit failure is better than silently trying to work badly.
"""
try:
return getpass.getuser()
except KeyError:
# Inner import to avoid circular import
from airflow.exceptions import AirflowConfigException
raise AirflowConfigException(
"The user that Airflow is running as has no username; you must run"
"Airflow as a full user, with a username and home directory, "
"in order for it to function properly."
)
| 2,805 | 31.627907 | 104 |
py
|
airflow
|
airflow-main/airflow/utils/xcom.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# MAX XCOM Size is 48KB
# https://github.com/apache/airflow/pull/1618#discussion_r68249677
from __future__ import annotations
MAX_XCOM_SIZE = 49344
XCOM_RETURN_KEY = "return_value"
| 970 | 37.84 | 66 |
py
|
airflow
|
airflow-main/airflow/utils/sqlalchemy.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import copy
import datetime
import json
import logging
from typing import TYPE_CHECKING, Any, Generator, Iterable, overload
import pendulum
from dateutil import relativedelta
from sqlalchemy import TIMESTAMP, PickleType, and_, event, false, nullsfirst, or_, true, tuple_
from sqlalchemy.dialects import mssql, mysql
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql import ColumnElement, Select
from sqlalchemy.sql.expression import ColumnOperators
from sqlalchemy.types import JSON, Text, TypeDecorator, TypeEngine, UnicodeText
from airflow import settings
from airflow.configuration import conf
from airflow.serialization.enums import Encoding
if TYPE_CHECKING:
from kubernetes.client.models.v1_pod import V1Pod
from sqlalchemy.orm import Query, Session
log = logging.getLogger(__name__)
utc = pendulum.tz.timezone("UTC")
using_mysql = conf.get_mandatory_value("database", "sql_alchemy_conn").lower().startswith("mysql")
class UtcDateTime(TypeDecorator):
"""
Similar to :class:`~sqlalchemy.types.TIMESTAMP` with ``timezone=True`` option, with some differences.
- Never silently take naive :class:`~datetime.datetime`, instead it
always raise :exc:`ValueError` unless time zone aware value.
- :class:`~datetime.datetime` value's :attr:`~datetime.datetime.tzinfo`
is always converted to UTC.
- Unlike SQLAlchemy's built-in :class:`~sqlalchemy.types.TIMESTAMP`,
it never return naive :class:`~datetime.datetime`, but time zone
aware value, even with SQLite or MySQL.
- Always returns TIMESTAMP in UTC.
"""
impl = TIMESTAMP(timezone=True)
cache_ok = True
def process_bind_param(self, value, dialect):
if value is not None:
if not isinstance(value, datetime.datetime):
raise TypeError("expected datetime.datetime, not " + repr(value))
elif value.tzinfo is None:
raise ValueError("naive datetime is disallowed")
# For mysql we should store timestamps as naive values
# Timestamp in MYSQL is not timezone aware. In MySQL 5.6
# timezone added at the end is ignored but in MySQL 5.7
# inserting timezone value fails with 'invalid-date'
# See https://issues.apache.org/jira/browse/AIRFLOW-7001
if using_mysql:
from airflow.utils.timezone import make_naive
return make_naive(value, timezone=utc)
return value.astimezone(utc)
return None
def process_result_value(self, value, dialect):
"""
Processes DateTimes from the DB making sure it is always returning UTC.
Not using timezone.convert_to_utc as that converts to configured TIMEZONE
while the DB might be running with some other setting. We assume UTC
datetimes in the database.
"""
if value is not None:
if value.tzinfo is None:
value = value.replace(tzinfo=utc)
else:
value = value.astimezone(utc)
return value
def load_dialect_impl(self, dialect):
if dialect.name == "mssql":
return mssql.DATETIME2(precision=6)
elif dialect.name == "mysql":
return mysql.TIMESTAMP(fsp=6)
return super().load_dialect_impl(dialect)
class ExtendedJSON(TypeDecorator):
"""
A version of the JSON column that uses the Airflow extended JSON serialization.
See airflow.serialization.
"""
impl = Text
cache_ok = True
def db_supports_json(self):
"""Checks if the database supports JSON (i.e. is NOT MSSQL)."""
return not conf.get("database", "sql_alchemy_conn").startswith("mssql")
def load_dialect_impl(self, dialect) -> TypeEngine:
if self.db_supports_json():
return dialect.type_descriptor(JSON)
return dialect.type_descriptor(UnicodeText)
def process_bind_param(self, value, dialect):
from airflow.serialization.serialized_objects import BaseSerialization
if value is None:
return None
# First, encode it into our custom JSON-targeted dict format
value = BaseSerialization.serialize(value)
# Then, if the database does not have native JSON support, encode it again as a string
if not self.db_supports_json():
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
from airflow.serialization.serialized_objects import BaseSerialization
if value is None:
return None
# Deserialize from a string first if needed
if not self.db_supports_json():
value = json.loads(value)
return BaseSerialization.deserialize(value)
def sanitize_for_serialization(obj: V1Pod):
"""
Convert pod to dict.... but *safely*.
When pod objects created with one k8s version are unpickled in a python
env with a more recent k8s version (in which the object attrs may have
changed) the unpickled obj may throw an error because the attr
expected on new obj may not be there on the unpickled obj.
This function still converts the pod to a dict; the only difference is
it populates missing attrs with None. You may compare with
https://github.com/kubernetes-client/python/blob/5a96bbcbe21a552cc1f9cda13e0522fafb0dbac8/kubernetes/client/api_client.py#L202
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
:meta private:
"""
if obj is None:
return None
elif isinstance(obj, (float, bool, bytes, str, int)):
return obj
elif isinstance(obj, list):
return [sanitize_for_serialization(sub_obj) for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(sanitize_for_serialization(sub_obj) for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
obj_dict = {
obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.openapi_types.items()
# below is the only line we change, and we just add default=None for getattr
if getattr(obj, attr, None) is not None
}
return {key: sanitize_for_serialization(val) for key, val in obj_dict.items()}
def ensure_pod_is_valid_after_unpickling(pod: V1Pod) -> V1Pod | None:
"""
Convert pod to json and back so that pod is safe.
The pod_override in executor_config is a V1Pod object.
Such objects created with one k8s version, when unpickled in
an env with upgraded k8s version, may blow up when
`to_dict` is called, because openapi client code gen calls
getattr on all attrs in openapi_types for each object, and when
new attrs are added to that list, getattr will fail.
Here we re-serialize it to ensure it is not going to blow up.
:meta private:
"""
try:
# if to_dict works, the pod is fine
pod.to_dict()
return pod
except AttributeError:
pass
try:
from kubernetes.client.models.v1_pod import V1Pod
except ImportError:
return None
if not isinstance(pod, V1Pod):
return None
try:
from airflow.kubernetes.pod_generator import PodGenerator
# now we actually reserialize / deserialize the pod
pod_dict = sanitize_for_serialization(pod)
return PodGenerator.deserialize_model_dict(pod_dict)
except Exception:
return None
class ExecutorConfigType(PickleType):
"""
Adds special handling for K8s executor config.
If we unpickle a k8s object that was pickled under an earlier k8s library version, then
the unpickled object may throw an error when to_dict is called. To be more tolerant of
version changes we convert to JSON using Airflow's serializer before pickling.
"""
cache_ok = True
def bind_processor(self, dialect):
from airflow.serialization.serialized_objects import BaseSerialization
super_process = super().bind_processor(dialect)
def process(value):
val_copy = copy.copy(value)
if isinstance(val_copy, dict) and "pod_override" in val_copy:
val_copy["pod_override"] = BaseSerialization.serialize(val_copy["pod_override"])
return super_process(val_copy)
return process
def result_processor(self, dialect, coltype):
from airflow.serialization.serialized_objects import BaseSerialization
super_process = super().result_processor(dialect, coltype)
def process(value):
value = super_process(value) # unpickle
if isinstance(value, dict) and "pod_override" in value:
pod_override = value["pod_override"]
if isinstance(pod_override, dict) and pod_override.get(Encoding.TYPE):
# If pod_override was serialized with Airflow's BaseSerialization, deserialize it
value["pod_override"] = BaseSerialization.deserialize(pod_override)
else:
# backcompat path
# we no longer pickle raw pods but this code may be reached
# when accessing executor configs created in a prior version
new_pod = ensure_pod_is_valid_after_unpickling(pod_override)
if new_pod:
value["pod_override"] = new_pod
return value
return process
def compare_values(self, x, y):
"""
The TaskInstance.executor_config attribute is a pickled object that may contain kubernetes objects.
If the installed library version has changed since the object was originally pickled,
due to the underlying ``__eq__`` method on these objects (which converts them to JSON),
we may encounter attribute errors. In this case we should replace the stored object.
From https://github.com/apache/airflow/pull/24356 we use our serializer to store
k8s objects, but there could still be raw pickled k8s objects in the database,
stored from earlier version, so we still compare them defensively here.
"""
if self.comparator:
return self.comparator(x, y)
else:
try:
return x == y
except AttributeError:
return False
class Interval(TypeDecorator):
"""Base class representing a time interval."""
impl = Text
cache_ok = True
attr_keys = {
datetime.timedelta: ("days", "seconds", "microseconds"),
relativedelta.relativedelta: (
"years",
"months",
"days",
"leapdays",
"hours",
"minutes",
"seconds",
"microseconds",
"year",
"month",
"day",
"hour",
"minute",
"second",
"microsecond",
),
}
def process_bind_param(self, value, dialect):
if isinstance(value, tuple(self.attr_keys)):
attrs = {key: getattr(value, key) for key in self.attr_keys[type(value)]}
return json.dumps({"type": type(value).__name__, "attrs": attrs})
return json.dumps(value)
def process_result_value(self, value, dialect):
if not value:
return value
data = json.loads(value)
if isinstance(data, dict):
type_map = {key.__name__: key for key in self.attr_keys}
return type_map[data["type"]](**data["attrs"])
return data
def skip_locked(session: Session) -> dict[str, Any]:
"""
Return kargs for passing to `with_for_update()` suitable for the current DB engine version.
We do this as we document the fact that on DB engines that don't support this construct, we do not
support/recommend running HA scheduler. If a user ignores this and tries anyway everything will still
work, just slightly slower in some circumstances.
Specifically don't emit SKIP LOCKED for MySQL < 8, or MariaDB, neither of which support this construct
See https://jira.mariadb.org/browse/MDEV-13115
"""
dialect = session.bind.dialect
if dialect.name != "mysql" or dialect.supports_for_update_of:
return {"skip_locked": True}
else:
return {}
def nowait(session: Session) -> dict[str, Any]:
"""
Return kwargs for passing to `with_for_update()` suitable for the current DB engine version.
We do this as we document the fact that on DB engines that don't support this construct, we do not
support/recommend running HA scheduler. If a user ignores this and tries anyway everything will still
work, just slightly slower in some circumstances.
Specifically don't emit NOWAIT for MySQL < 8, or MariaDB, neither of which support this construct
See https://jira.mariadb.org/browse/MDEV-13115
"""
dialect = session.bind.dialect
if dialect.name != "mysql" or dialect.supports_for_update_of:
return {"nowait": True}
else:
return {}
def nulls_first(col, session: Session) -> dict[str, Any]:
"""Specify *NULLS FIRST* to the column ordering.
This is only done to Postgres, currently the only backend that supports it.
Other databases do not need it since NULL values are considered lower than
any other values, and appear first when the order is ASC (ascending).
"""
if session.bind.dialect.name == "postgresql":
return nullsfirst(col)
else:
return col
USE_ROW_LEVEL_LOCKING: bool = conf.getboolean("scheduler", "use_row_level_locking", fallback=True)
def with_row_locks(query: Query, session: Session, **kwargs) -> Query:
"""
Apply with_for_update to an SQLAlchemy query, if row level locking is in use.
:param query: An SQLAlchemy Query object
:param session: ORM Session
:param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc)
:return: updated query
"""
dialect = session.bind.dialect
# Don't use row level locks if the MySQL dialect (Mariadb & MySQL < 8) does not support it.
if USE_ROW_LEVEL_LOCKING and (dialect.name != "mysql" or dialect.supports_for_update_of):
return query.with_for_update(**kwargs)
else:
return query
@contextlib.contextmanager
def lock_rows(query: Query, session: Session) -> Generator[None, None, None]:
"""Lock database rows during the context manager block.
This is a convenient method for ``with_row_locks`` when we don't need the
locked rows.
:meta private:
"""
locked_rows = with_row_locks(query, session).all()
yield
del locked_rows
class CommitProhibitorGuard:
"""Context manager class that powers prohibit_commit."""
expected_commit = False
def __init__(self, session: Session):
self.session = session
def _validate_commit(self, _):
if self.expected_commit:
self.expected_commit = False
return
raise RuntimeError("UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!")
def __enter__(self):
event.listen(self.session, "before_commit", self._validate_commit)
return self
def __exit__(self, *exc_info):
event.remove(self.session, "before_commit", self._validate_commit)
def commit(self):
"""
Commit the session.
This is the required way to commit when the guard is in scope
"""
self.expected_commit = True
self.session.commit()
def prohibit_commit(session):
"""
Return a context manager that will disallow any commit that isn't done via the context manager.
The aim of this is to ensure that transaction lifetime is strictly controlled which is especially
important in the core scheduler loop. Any commit on the session that is _not_ via this context manager
will result in RuntimeError
Example usage:
.. code:: python
with prohibit_commit(session) as guard:
# ... do something with session
guard.commit()
# This would throw an error
# session.commit()
"""
return CommitProhibitorGuard(session)
def is_lock_not_available_error(error: OperationalError):
"""Check if the Error is about not being able to acquire lock."""
# DB specific error codes:
# Postgres: 55P03
# MySQL: 3572, 'Statement aborted because lock(s) could not be acquired immediately and NOWAIT
# is set.'
# MySQL: 1205, 'Lock wait timeout exceeded; try restarting transaction
# (when NOWAIT isn't available)
db_err_code = getattr(error.orig, "pgcode", None) or error.orig.args[0]
# We could test if error.orig is an instance of
# psycopg2.errors.LockNotAvailable/_mysql_exceptions.OperationalError, but that involves
# importing it. This doesn't
if db_err_code in ("55P03", 1205, 3572):
return True
return False
@overload
def tuple_in_condition(
columns: tuple[ColumnElement, ...],
collection: Iterable[Any],
) -> ColumnOperators:
...
@overload
def tuple_in_condition(
columns: tuple[ColumnElement, ...],
collection: Select,
*,
session: Session,
) -> ColumnOperators:
...
def tuple_in_condition(
columns: tuple[ColumnElement, ...],
collection: Iterable[Any] | Select,
*,
session: Session | None = None,
) -> ColumnOperators:
"""Generates a tuple-in-collection operator to use in ``.where()``.
For most SQL backends, this generates a simple ``([col, ...]) IN [condition]``
clause. This however does not work with MSSQL, where we need to expand to
``(c1 = v1a AND c2 = v2a ...) OR (c1 = v1b AND c2 = v2b ...) ...`` manually.
:meta private:
"""
if settings.engine.dialect.name != "mssql":
return tuple_(*columns).in_(collection)
if not isinstance(collection, Select):
rows = collection
elif session is None:
raise TypeError("session is required when passing in a subquery")
else:
rows = session.execute(collection)
clauses = [and_(*(c == v for c, v in zip(columns, values))) for values in rows]
if not clauses:
return false()
return or_(*clauses)
@overload
def tuple_not_in_condition(
columns: tuple[ColumnElement, ...],
collection: Iterable[Any],
) -> ColumnOperators:
...
@overload
def tuple_not_in_condition(
columns: tuple[ColumnElement, ...],
collection: Select,
*,
session: Session,
) -> ColumnOperators:
...
def tuple_not_in_condition(
columns: tuple[ColumnElement, ...],
collection: Iterable[Any] | Select,
*,
session: Session | None = None,
) -> ColumnOperators:
"""Generates a tuple-not-in-collection operator to use in ``.where()``.
This is similar to ``tuple_in_condition`` except generating ``NOT IN``.
:meta private:
"""
if settings.engine.dialect.name != "mssql":
return tuple_(*columns).not_in(collection)
if not isinstance(collection, Select):
rows = collection
elif session is None:
raise TypeError("session is required when passing in a subquery")
else:
rows = session.execute(collection)
clauses = [or_(*(c != v for c, v in zip(columns, values))) for values in rows]
if not clauses:
return true()
return and_(*clauses)
| 20,675 | 33.006579 | 130 |
py
|
airflow
|
airflow-main/airflow/utils/serve_logs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Serve logs process."""
from __future__ import annotations
import collections
import logging
import os
import socket
import gunicorn.app.base
from flask import Flask, abort, request, send_from_directory
from jwt.exceptions import (
ExpiredSignatureError,
ImmatureSignatureError,
InvalidAudienceError,
InvalidIssuedAtError,
InvalidSignatureError,
)
from setproctitle import setproctitle
from airflow.configuration import conf
from airflow.utils.docs import get_docs_url
from airflow.utils.jwt_signer import JWTSigner
logger = logging.getLogger(__name__)
def create_app():
flask_app = Flask(__name__, static_folder=None)
expiration_time_in_seconds = conf.getint("webserver", "log_request_clock_grace", fallback=30)
log_directory = os.path.expanduser(conf.get("logging", "BASE_LOG_FOLDER"))
signer = JWTSigner(
secret_key=conf.get("webserver", "secret_key"),
expiration_time_in_seconds=expiration_time_in_seconds,
audience="task-instance-logs",
)
# Prevent direct access to the logs port
@flask_app.before_request
def validate_pre_signed_url():
try:
auth = request.headers.get("Authorization")
if auth is None:
logger.warning("The Authorization header is missing: %s.", request.headers)
abort(403)
payload = signer.verify_token(auth)
token_filename = payload.get("filename")
request_filename = request.view_args["filename"]
if token_filename is None:
logger.warning("The payload does not contain 'filename' key: %s.", payload)
abort(403)
if token_filename != request_filename:
logger.warning(
"The payload log_relative_path key is different than the one in token:"
"Request path: %s. Token path: %s.",
request_filename,
token_filename,
)
abort(403)
except InvalidAudienceError:
logger.warning("Invalid audience for the request", exc_info=True)
abort(403)
except InvalidSignatureError:
logger.warning("The signature of the request was wrong", exc_info=True)
abort(403)
except ImmatureSignatureError:
logger.warning("The signature of the request was sent from the future", exc_info=True)
abort(403)
except ExpiredSignatureError:
logger.warning(
"The signature of the request has expired. Make sure that all components "
"in your system have synchronized clocks. "
"See more at %s",
get_docs_url("configurations-ref.html#secret-key"),
exc_info=True,
)
abort(403)
except InvalidIssuedAtError:
logger.warning(
"The request was issues in the future. Make sure that all components "
"in your system have synchronized clocks. "
"See more at %s",
get_docs_url("configurations-ref.html#secret-key"),
exc_info=True,
)
abort(403)
except Exception:
logger.warning("Unknown error", exc_info=True)
abort(403)
@flask_app.route("/log/<path:filename>")
def serve_logs_view(filename):
return send_from_directory(log_directory, filename, mimetype="application/json", as_attachment=False)
return flask_app
GunicornOption = collections.namedtuple("GunicornOption", ["key", "value"])
class StandaloneGunicornApplication(gunicorn.app.base.BaseApplication):
"""
Standalone Gunicorn application/serve for usage with any WSGI-application.
Code inspired by an example from the Gunicorn documentation.
https://github.com/benoitc/gunicorn/blob/cf55d2cec277f220ebd605989ce78ad1bb553c46/examples/standalone_app.py
For details, about standalone gunicorn application, see:
https://docs.gunicorn.org/en/stable/custom.html
"""
def __init__(self, app, options=None):
self.options = options or []
self.application = app
super().__init__()
def load_config(self):
for option in self.options:
self.cfg.set(option.key.lower(), option.value)
def load(self):
return self.application
def serve_logs(port=None):
"""Serves logs generated by Worker."""
setproctitle("airflow serve-logs")
wsgi_app = create_app()
port = port or conf.getint("logging", "WORKER_LOG_SERVER_PORT")
# If dual stack is available and IPV6_V6ONLY is not enabled on the socket
# then when IPV6 is bound to it will also bind to IPV4 automatically
if getattr(socket, "has_dualstack_ipv6", lambda: False)():
bind_option = GunicornOption("bind", f"[::]:{port}")
else:
bind_option = GunicornOption("bind", f"0.0.0.0:{port}")
options = [bind_option, GunicornOption("workers", 2)]
StandaloneGunicornApplication(wsgi_app, options).run()
if __name__ == "__main__":
serve_logs()
| 5,912 | 35.726708 | 112 |
py
|
airflow
|
airflow-main/airflow/utils/setup_teardown.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from airflow.exceptions import AirflowException
if TYPE_CHECKING:
from airflow.models.abstractoperator import AbstractOperator
from airflow.models.taskmixin import DependencyMixin
from airflow.models.xcom_arg import PlainXComArg
class BaseSetupTeardownContext:
"""Context manager for setup/teardown tasks.
:meta private:
"""
active: bool = False
context_map: dict[AbstractOperator | tuple[AbstractOperator], list[AbstractOperator]] = {}
_context_managed_setup_task: AbstractOperator | list[AbstractOperator] = []
_previous_context_managed_setup_task: list[AbstractOperator | list[AbstractOperator]] = []
_context_managed_teardown_task: AbstractOperator | list[AbstractOperator] = []
_previous_context_managed_teardown_task: list[AbstractOperator | list[AbstractOperator]] = []
@classmethod
def push_context_managed_setup_task(cls, task: AbstractOperator | list[AbstractOperator]):
if cls._context_managed_setup_task:
cls._previous_context_managed_setup_task.append(cls._context_managed_setup_task)
cls._context_managed_setup_task = task
@classmethod
def push_context_managed_teardown_task(cls, task: AbstractOperator | list[AbstractOperator]):
if cls._context_managed_teardown_task:
cls._previous_context_managed_teardown_task.append(cls._context_managed_teardown_task)
cls._context_managed_teardown_task = task
@classmethod
def pop_context_managed_setup_task(cls) -> AbstractOperator | list[AbstractOperator]:
old_setup_task = cls._context_managed_setup_task
if cls._previous_context_managed_setup_task:
cls._context_managed_setup_task = cls._previous_context_managed_setup_task.pop()
setup_task = cls._context_managed_setup_task
if setup_task and old_setup_task:
if isinstance(setup_task, list):
for task in setup_task:
task.set_downstream(old_setup_task)
else:
setup_task.set_downstream(old_setup_task)
else:
cls._context_managed_setup_task = []
return old_setup_task
@classmethod
def update_context_map(cls, task: DependencyMixin):
from airflow.models.abstractoperator import AbstractOperator
task_ = cast(AbstractOperator, task)
if task_.is_setup or task_.is_teardown:
return
ctx = cls.context_map
def _get_or_set_item(item):
if ctx.get(item) is None:
ctx[item] = [task_]
else:
ctx[item].append(task_)
if setup_task := cls.get_context_managed_setup_task():
if isinstance(setup_task, list):
_get_or_set_item(tuple(setup_task))
else:
_get_or_set_item(setup_task)
if teardown_task := cls.get_context_managed_teardown_task():
if isinstance(teardown_task, list):
_get_or_set_item(tuple(teardown_task))
else:
_get_or_set_item(teardown_task)
@classmethod
def pop_context_managed_teardown_task(cls) -> AbstractOperator | list[AbstractOperator]:
old_teardown_task = cls._context_managed_teardown_task
if cls._previous_context_managed_teardown_task:
cls._context_managed_teardown_task = cls._previous_context_managed_teardown_task.pop()
teardown_task = cls._context_managed_teardown_task
if teardown_task and old_teardown_task:
if isinstance(teardown_task, list):
for task in teardown_task:
task.set_upstream(old_teardown_task)
else:
teardown_task.set_upstream(old_teardown_task)
else:
cls._context_managed_teardown_task = []
return old_teardown_task
@classmethod
def get_context_managed_setup_task(cls) -> AbstractOperator | list[AbstractOperator]:
return cls._context_managed_setup_task
@classmethod
def get_context_managed_teardown_task(cls) -> AbstractOperator | list[AbstractOperator]:
return cls._context_managed_teardown_task
@classmethod
def push_setup_teardown_task(cls, operator: AbstractOperator | list[AbstractOperator]):
if isinstance(operator, list):
if operator[0].is_teardown:
cls._push_tasks(operator)
elif operator[0].is_setup:
cls._push_tasks(operator, setup=True)
elif operator.is_teardown:
cls._push_tasks(operator)
elif operator.is_setup:
cls._push_tasks(operator, setup=True)
cls.active = True
@classmethod
def _push_tasks(cls, operator: AbstractOperator | list[AbstractOperator], setup: bool = False):
if isinstance(operator, list):
upstream_tasks = operator[0].upstream_list
downstream_list = operator[0].downstream_list
if not all(task.is_setup == operator[0].is_setup for task in operator):
cls.error("All tasks in the list must be either setup or teardown tasks")
else:
upstream_tasks = operator.upstream_list
downstream_list = operator.downstream_list
if setup:
cls.push_context_managed_setup_task(operator)
if downstream_list:
cls.push_context_managed_teardown_task(list(downstream_list))
else:
cls.push_context_managed_teardown_task(operator)
if upstream_tasks:
cls.push_context_managed_setup_task(list(upstream_tasks))
@classmethod
def set_work_task_roots_and_leaves(cls):
if setup_task := cls.get_context_managed_setup_task():
if isinstance(setup_task, list):
setup_task = tuple(setup_task)
tasks_in_context = cls.context_map.get(setup_task, [])
if tasks_in_context:
roots = [task for task in tasks_in_context if not task.upstream_list]
if not roots:
setup_task >> tasks_in_context[0]
elif isinstance(setup_task, tuple):
for task in setup_task:
task >> roots
else:
setup_task >> roots
if teardown_task := cls.get_context_managed_teardown_task():
if isinstance(teardown_task, list):
teardown_task = tuple(teardown_task)
tasks_in_context = cls.context_map.get(teardown_task, [])
if tasks_in_context:
leaves = [task for task in tasks_in_context if not task.downstream_list]
if not leaves:
teardown_task << tasks_in_context[-1]
elif isinstance(teardown_task, tuple):
for task in teardown_task:
task << leaves
else:
teardown_task << leaves
setup_task = cls.pop_context_managed_setup_task()
teardown_task = cls.pop_context_managed_teardown_task()
if isinstance(setup_task, list):
setup_task = tuple(setup_task)
if isinstance(teardown_task, list):
teardown_task = tuple(teardown_task)
cls.active = False
cls.context_map.pop(setup_task, None)
cls.context_map.pop(teardown_task, None)
@classmethod
def error(cls, message: str):
cls.active = False
cls.context_map.clear()
cls._context_managed_setup_task = []
cls._context_managed_teardown_task = []
cls._previous_context_managed_setup_task = []
cls._previous_context_managed_teardown_task = []
raise ValueError(message)
class SetupTeardownContext(BaseSetupTeardownContext):
"""Context manager for setup and teardown tasks."""
@staticmethod
def add_task(task: AbstractOperator | PlainXComArg):
"""Add task to context manager."""
from airflow.models.xcom_arg import PlainXComArg
if not SetupTeardownContext.active:
raise AirflowException("Cannot add task to context outside the context manager.")
if isinstance(task, PlainXComArg):
task = task.operator
SetupTeardownContext.update_context_map(task)
| 9,174 | 41.674419 | 99 |
py
|
airflow
|
airflow-main/airflow/utils/email.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections.abc
import logging
import os
import smtplib
import warnings
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from typing import Any, Iterable
import re2
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException, AirflowException, RemovedInAirflow3Warning
log = logging.getLogger(__name__)
def send_email(
to: list[str] | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
dryrun: bool = False,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
conn_id: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""
Send an email using the backend specified in the *EMAIL_BACKEND* configuration option.
:param to: A list or iterable of email addresses to send the email to.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to attach to the email.
:param dryrun: If *True*, the email will not actually be sent. Default: *False*.
:param cc: A string or iterable of strings containing email addresses to send a copy of the email to.
:param bcc: A string or iterable of strings containing email addresses to send a
blind carbon copy of the email to.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param conn_id: The connection ID to use for the backend. If not provided, the default connection
specified in the *EMAIL_CONN_ID* configuration option will be used.
:param custom_headers: A dictionary of additional headers to add to the MIME message.
No validations are run on these values, and they should be able to be encoded.
:param kwargs: Additional keyword arguments to pass to the backend.
"""
backend = conf.getimport("email", "EMAIL_BACKEND")
backend_conn_id = conn_id or conf.get("email", "EMAIL_CONN_ID")
from_email = conf.get("email", "from_email", fallback=None)
to_list = get_email_address_list(to)
to_comma_separated = ", ".join(to_list)
return backend(
to_comma_separated,
subject,
html_content,
files=files,
dryrun=dryrun,
cc=cc,
bcc=bcc,
mime_subtype=mime_subtype,
mime_charset=mime_charset,
conn_id=backend_conn_id,
from_email=from_email,
custom_headers=custom_headers,
**kwargs,
)
def send_email_smtp(
to: str | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
dryrun: bool = False,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
conn_id: str = "smtp_default",
from_email: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""Send an email with html content.
:param to: Recipient email address or list of addresses.
:param subject: Email subject.
:param html_content: Email body in HTML format.
:param files: List of file paths to attach to the email.
:param dryrun: If True, the email will not be sent, but all other actions will be performed.
:param cc: Carbon copy recipient email address or list of addresses.
:param bcc: Blind carbon copy recipient email address or list of addresses.
:param mime_subtype: MIME subtype of the email.
:param mime_charset: MIME charset of the email.
:param conn_id: Connection ID of the SMTP server.
:param from_email: Sender email address.
:param custom_headers: Dictionary of custom headers to include in the email.
:param kwargs: Additional keyword arguments.
>>> send_email('[email protected]', 'foo', '<b>Foo</b> bar', ['/dev/null'], dryrun=True)
"""
smtp_mail_from = conf.get("smtp", "SMTP_MAIL_FROM")
if smtp_mail_from is not None:
mail_from = smtp_mail_from
else:
if from_email is None:
raise Exception(
"You should set from email - either by smtp/smtp_mail_from config or `from_email` parameter"
)
mail_from = from_email
msg, recipients = build_mime_message(
mail_from=mail_from,
to=to,
subject=subject,
html_content=html_content,
files=files,
cc=cc,
bcc=bcc,
mime_subtype=mime_subtype,
mime_charset=mime_charset,
custom_headers=custom_headers,
)
send_mime_email(e_from=mail_from, e_to=recipients, mime_msg=msg, conn_id=conn_id, dryrun=dryrun)
def build_mime_message(
mail_from: str | None,
to: str | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
custom_headers: dict[str, Any] | None = None,
) -> tuple[MIMEMultipart, list[str]]:
"""
Build a MIME message that can be used to send an email and returns a full list of recipients.
:param mail_from: Email address to set as the email's "From" field.
:param to: A string or iterable of strings containing email addresses to set as the email's "To" field.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to be attached to the email.
:param cc: A string or iterable of strings containing email addresses to set as the email's "CC" field.
:param bcc: A string or iterable of strings containing email addresses to set as the email's "BCC" field.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param custom_headers: Additional headers to add to the MIME message. No validations are run on these
values, and they should be able to be encoded.
:return: A tuple containing the email as a MIMEMultipart object and a list of recipient email addresses.
"""
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg["Subject"] = subject
msg["From"] = mail_from
msg["To"] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg["CC"] = ", ".join(cc)
recipients += cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients += bcc
msg["Date"] = formatdate(localtime=True)
mime_text = MIMEText(html_content, "html", mime_charset)
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as file:
part = MIMEApplication(file.read(), Name=basename)
part["Content-Disposition"] = f'attachment; filename="{basename}"'
part["Content-ID"] = f"<{basename}>"
msg.attach(part)
if custom_headers:
for header_key, header_value in custom_headers.items():
msg[header_key] = header_value
return msg, recipients
def send_mime_email(
e_from: str,
e_to: str | list[str],
mime_msg: MIMEMultipart,
conn_id: str = "smtp_default",
dryrun: bool = False,
) -> None:
"""
Send a MIME email.
:param e_from: The email address of the sender.
:param e_to: The email address or a list of email addresses of the recipient(s).
:param mime_msg: The MIME message to send.
:param conn_id: The ID of the SMTP connection to use.
:param dryrun: If True, the email will not be sent, but a log message will be generated.
"""
smtp_host = conf.get_mandatory_value("smtp", "SMTP_HOST")
smtp_port = conf.getint("smtp", "SMTP_PORT")
smtp_starttls = conf.getboolean("smtp", "SMTP_STARTTLS")
smtp_ssl = conf.getboolean("smtp", "SMTP_SSL")
smtp_retry_limit = conf.getint("smtp", "SMTP_RETRY_LIMIT")
smtp_timeout = conf.getint("smtp", "SMTP_TIMEOUT")
smtp_user = None
smtp_password = None
if conn_id is not None:
try:
from airflow.hooks.base import BaseHook
airflow_conn = BaseHook.get_connection(conn_id)
smtp_user = airflow_conn.login
smtp_password = airflow_conn.password
except AirflowException:
pass
if smtp_user is None or smtp_password is None:
warnings.warn(
"Fetching SMTP credentials from configuration variables will be deprecated in a future "
"release. Please set credentials using a connection instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
try:
smtp_user = conf.get("smtp", "SMTP_USER")
smtp_password = conf.get("smtp", "SMTP_PASSWORD")
except AirflowConfigException:
log.debug("No user/password found for SMTP, so logging in with no authentication.")
if not dryrun:
for attempt in range(1, smtp_retry_limit + 1):
log.info("Email alerting: attempt %s", str(attempt))
try:
smtp_conn = _get_smtp_connection(smtp_host, smtp_port, smtp_timeout, smtp_ssl)
except smtplib.SMTPServerDisconnected:
if attempt < smtp_retry_limit:
continue
raise
if smtp_starttls:
smtp_conn.starttls()
if smtp_user and smtp_password:
smtp_conn.login(smtp_user, smtp_password)
log.info("Sent an alert email to %s", e_to)
smtp_conn.sendmail(e_from, e_to, mime_msg.as_string())
smtp_conn.quit()
break
def get_email_address_list(addresses: str | Iterable[str]) -> list[str]:
"""
Returns a list of email addresses from the provided input.
:param addresses: A string or iterable of strings containing email addresses.
:return: A list of email addresses.
:raises TypeError: If the input is not a string or iterable of strings.
"""
if isinstance(addresses, str):
return _get_email_list_from_str(addresses)
elif isinstance(addresses, collections.abc.Iterable):
if not all(isinstance(item, str) for item in addresses):
raise TypeError("The items in your iterable must be strings.")
return list(addresses)
else:
raise TypeError(f"Unexpected argument type: Received '{type(addresses).__name__}'.")
def _get_smtp_connection(host: str, port: int, timeout: int, with_ssl: bool) -> smtplib.SMTP:
"""
Returns an SMTP connection to the specified host and port, with optional SSL encryption.
:param host: The hostname or IP address of the SMTP server.
:param port: The port number to connect to on the SMTP server.
:param timeout: The timeout in seconds for the connection.
:param with_ssl: Whether to use SSL encryption for the connection.
:return: An SMTP connection to the specified host and port.
"""
return (
smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
if with_ssl
else smtplib.SMTP(host=host, port=port, timeout=timeout)
)
def _get_email_list_from_str(addresses: str) -> list[str]:
"""
Extract a list of email addresses from a string.
The string can contain multiple email addresses separated
by any of the following delimiters: ',' or ';'.
:param addresses: A string containing one or more email addresses.
:return: A list of email addresses.
"""
pattern = r"\s*[,;]\s*"
return [address for address in re2.split(pattern, addresses)]
| 12,775 | 37.251497 | 109 |
py
|
airflow
|
airflow-main/airflow/utils/net.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import socket
from functools import lru_cache
from airflow.configuration import conf
# patched version of socket.getfqdn() - see https://github.com/python/cpython/issues/49254
@lru_cache(maxsize=None)
def getfqdn(name=""):
"""
Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
"""
name = name.strip()
if not name or name == "0.0.0.0":
name = socket.gethostname()
try:
addrs = socket.getaddrinfo(name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)
except OSError:
pass
else:
for addr in addrs:
if addr[3]:
name = addr[3]
break
return name
def get_host_ip_address():
"""Fetch host ip address."""
return socket.gethostbyname(getfqdn())
def get_hostname():
"""Fetch the hostname using the callable from config or use `airflow.utils.net.getfqdn` as a fallback."""
return conf.getimport("core", "hostname_callable", fallback="airflow.utils.net.getfqdn")()
| 1,872 | 31.859649 | 109 |
py
|
airflow
|
airflow-main/airflow/utils/yaml.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Use libyaml for YAML dump/load operations where possible.
If libyaml is available we will use it -- it is significantly faster.
This module delegates all other properties to the yaml module, so it can be used as:
.. code-block:: python
import airflow.utils.yaml as yaml
And then be used directly in place of the normal python module.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, BinaryIO, TextIO, cast
if TYPE_CHECKING:
from yaml.error import MarkedYAMLError, YAMLError # noqa
def safe_load(stream: bytes | str | BinaryIO | TextIO) -> Any:
"""Like yaml.safe_load, but use the C libyaml for speed where we can."""
# delay import until use.
from yaml import load as orig
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader # type: ignore[assignment, no-redef]
return orig(stream, SafeLoader)
def dump(data: Any, **kwargs) -> str:
"""Like yaml.safe_dump, but use the C libyaml for speed where we can."""
# delay import until use.
from yaml import dump as orig
try:
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeDumper # type: ignore[assignment, no-redef]
return cast(str, orig(data, Dumper=SafeDumper, **kwargs))
def __getattr__(name):
# Delegate anything else to the yaml module
import yaml
if name == "FullLoader":
# Try to use CFullLoader by default
getattr(yaml, "CFullLoader", yaml.FullLoader)
return getattr(yaml, name)
| 2,367 | 32.352113 | 84 |
py
|
airflow
|
airflow-main/airflow/utils/weekday.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Get the ISO standard day number of the week from a given day string."""
from __future__ import annotations
import enum
from typing import Iterable
@enum.unique
class WeekDay(enum.IntEnum):
"""Python Enum containing Days of the Week."""
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
@classmethod
def get_weekday_number(cls, week_day_str: str):
"""
Return the ISO Week Day Number for a Week Day.
:param week_day_str: Full Name of the Week Day. Example: "Sunday"
:return: ISO Week Day Number corresponding to the provided Weekday
"""
sanitized_week_day_str = week_day_str.upper()
if sanitized_week_day_str not in cls.__members__:
raise AttributeError(f'Invalid Week Day passed: "{week_day_str}"')
return cls[sanitized_week_day_str]
@classmethod
def convert(cls, day: str | WeekDay) -> int:
"""Helper function that returns the day number in the week."""
if isinstance(day, WeekDay):
return day
return cls.get_weekday_number(week_day_str=day)
@classmethod
def validate_week_day(
cls,
week_day: str | WeekDay | Iterable[str] | Iterable[WeekDay],
) -> set[int]:
"""Validate each item of iterable and create a set to ease compare of values."""
if not isinstance(week_day, Iterable):
if isinstance(week_day, WeekDay):
week_day = {week_day}
else:
raise TypeError(
f"Unsupported Type for week_day parameter: {type(week_day)}."
"Input should be iterable type:"
"str, set, list, dict or Weekday enum type"
)
if isinstance(week_day, str):
week_day = {week_day}
return {cls.convert(item) for item in week_day}
| 2,697 | 34.038961 | 88 |
py
|
airflow
|
airflow-main/airflow/utils/session.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
from functools import wraps
from inspect import signature
from typing import Callable, Generator, TypeVar, cast
from airflow import settings
from airflow.typing_compat import ParamSpec
@contextlib.contextmanager
def create_session() -> Generator[settings.SASession, None, None]:
"""Contextmanager that will create and teardown a session."""
Session = getattr(settings, "Session", None)
if Session is None:
raise RuntimeError("Session must be set before!")
session = Session()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
PS = ParamSpec("PS")
RT = TypeVar("RT")
def find_session_idx(func: Callable[PS, RT]) -> int:
"""Find session index in function call parameter."""
func_params = signature(func).parameters
try:
# func_params is an ordered dict -- this is the "recommended" way of getting the position
session_args_idx = tuple(func_params).index("session")
except ValueError:
raise ValueError(f"Function {func.__qualname__} has no `session` argument") from None
return session_args_idx
def provide_session(func: Callable[PS, RT]) -> Callable[PS, RT]:
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
session_args_idx = find_session_idx(func)
@wraps(func)
def wrapper(*args, **kwargs) -> RT:
if "session" in kwargs or session_args_idx < len(args):
return func(*args, **kwargs)
else:
with create_session() as session:
return func(*args, session=session, **kwargs)
return wrapper
# A fake session to use in functions decorated by provide_session. This allows
# the 'session' argument to be of type Session instead of Session | None,
# making it easier to type hint the function body without dealing with the None
# case that can never happen at runtime.
NEW_SESSION: settings.SASession = cast(settings.SASession, None)
| 3,043 | 33.988506 | 97 |
py
|
airflow
|
airflow-main/airflow/utils/event_scheduler.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sched import scheduler
from typing import Callable
class EventScheduler(scheduler):
"""General purpose event scheduler."""
def call_regular_interval(
self,
delay: float,
action: Callable,
arguments=(),
kwargs={},
):
"""Helper to call a function at (roughly) a given interval."""
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
self.enter(delay, 1, repeat, args, kwargs)
self.enter(delay, 1, repeat, arguments, kwargs)
| 1,526 | 33.704545 | 75 |
py
|
airflow
|
airflow-main/airflow/utils/deprecation_tools.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import functools
import importlib
import sys
import warnings
from types import ModuleType
def getattr_with_deprecation(imports: dict[str, str], module: str, name: str):
target_class_full_name = imports.get(name)
if not target_class_full_name:
raise AttributeError(f"The module `{module!r}` has no attribute `{name!r}`")
warnings.warn(
f"The `{module}.{name}` class is deprecated. Please use `{target_class_full_name!r}`.",
DeprecationWarning,
stacklevel=2,
)
new_module, new_class_name = target_class_full_name.rsplit(".", 1)
return getattr(importlib.import_module(new_module), new_class_name)
def add_deprecated_classes(module_imports: dict[str, dict[str, str]], package: str):
for module_name, imports in module_imports.items():
full_module_name = f"{package}.{module_name}"
module_type = ModuleType(full_module_name)
# Mypy is not able to derive the right function signature https://github.com/python/mypy/issues/2427
module_type.__getattr__ = functools.partial( # type: ignore[assignment]
getattr_with_deprecation, imports, full_module_name
)
sys.modules.setdefault(full_module_name, module_type)
| 2,047 | 41.666667 | 108 |
py
|
airflow
|
airflow-main/airflow/utils/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/utils/weight_rule.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from enum import Enum
from airflow.compat.functools import cache
class WeightRule(str, Enum):
"""Weight rules."""
DOWNSTREAM = "downstream"
UPSTREAM = "upstream"
ABSOLUTE = "absolute"
@classmethod
def is_valid(cls, weight_rule: str) -> bool:
"""Check if weight rule is valid."""
return weight_rule in cls.all_weight_rules()
@classmethod
@cache
def all_weight_rules(cls) -> set[str]:
"""Returns all weight rules."""
return set(cls.__members__.values())
def __str__(self) -> str:
return self.value
| 1,406 | 30.266667 | 62 |
py
|
airflow
|
airflow-main/airflow/utils/hashlib_wrapper.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import hashlib
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from _typeshed import ReadableBuffer
from airflow import PY39
def md5(__string: ReadableBuffer = b"") -> hashlib._Hash:
"""
Safely allows calling the hashlib.md5 function when "usedforsecurity" is disabled in the configuration.
:param string: The data to hash. Default to empty str byte.
:return: The hashed value.
"""
if PY39:
return hashlib.md5(__string, usedforsecurity=False) # type: ignore
return hashlib.md5(__string)
| 1,359 | 33.871795 | 107 |
py
|
airflow
|
airflow-main/airflow/utils/edgemodifier.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Sequence
from airflow.models.taskmixin import DAGNode, DependencyMixin
from airflow.utils.task_group import TaskGroup
class EdgeModifier(DependencyMixin):
"""
Class that represents edge information to be added between two tasks/operators.
Has shorthand factory functions, like Label("hooray").
Current implementation supports
t1 >> Label("Success route") >> t2
t2 << Label("Success route") << t2
Note that due to the potential for use in either direction, this waits
to make the actual connection between both sides until both are declared,
and will do so progressively if multiple ups/downs are added.
This and EdgeInfo are related - an EdgeModifier is the Python object you
use to add information to (potentially multiple) edges, and EdgeInfo
is the representation of the information for one specific edge.
"""
def __init__(self, label: str | None = None):
self.label = label
self._upstream: list[DependencyMixin] = []
self._downstream: list[DependencyMixin] = []
@property
def roots(self):
return self._downstream
@property
def leaves(self):
return self._upstream
@staticmethod
def _make_list(item_or_list: DependencyMixin | Sequence[DependencyMixin]) -> Sequence[DependencyMixin]:
if not isinstance(item_or_list, Sequence):
return [item_or_list]
return item_or_list
def _save_nodes(
self,
nodes: DependencyMixin | Sequence[DependencyMixin],
stream: list[DependencyMixin],
):
from airflow.models.xcom_arg import XComArg
for node in self._make_list(nodes):
if isinstance(node, (TaskGroup, XComArg, DAGNode)):
stream.append(node)
else:
raise TypeError(
f"Cannot use edge labels with {type(node).__name__}, "
f"only tasks, XComArg or TaskGroups"
)
def _convert_streams_to_task_groups(self):
"""
Convert a node to a TaskGroup or leave it as a DAGNode.
Requires both self._upstream and self._downstream.
To do this, we keep a set of group_ids seen among the streams. If we find that
the nodes are from the same TaskGroup, we will leave them as DAGNodes and not
convert them to TaskGroups
"""
from airflow.models.xcom_arg import XComArg
group_ids = set()
for node in [*self._upstream, *self._downstream]:
if isinstance(node, DAGNode) and node.task_group:
if node.task_group.is_root:
group_ids.add("root")
else:
group_ids.add(node.task_group.group_id)
elif isinstance(node, TaskGroup):
group_ids.add(node.group_id)
elif isinstance(node, XComArg):
if isinstance(node.operator, DAGNode) and node.operator.task_group:
if node.operator.task_group.is_root:
group_ids.add("root")
else:
group_ids.add(node.operator.task_group.group_id)
# If all nodes originate from the same TaskGroup, we will not convert them
if len(group_ids) != 1:
self._upstream = self._convert_stream_to_task_groups(self._upstream)
self._downstream = self._convert_stream_to_task_groups(self._downstream)
def _convert_stream_to_task_groups(self, stream: Sequence[DependencyMixin]) -> Sequence[DependencyMixin]:
return [
node.task_group
if isinstance(node, DAGNode) and node.task_group and not node.task_group.is_root
else node
for node in stream
]
def set_upstream(
self,
other: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
):
"""
Set the given task/list onto the upstream attribute, then attempt to resolve the relationship.
Providing this also provides << via DependencyMixin.
"""
self._save_nodes(other, self._upstream)
if self._upstream and self._downstream:
# Convert _upstream and _downstream to task_groups only after both are set
self._convert_streams_to_task_groups()
for node in self._downstream:
node.set_upstream(other, edge_modifier=self)
def set_downstream(
self,
other: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
):
"""
Set the given task/list onto the downstream attribute, then attempt to resolve the relationship.
Providing this also provides >> via DependencyMixin.
"""
self._save_nodes(other, self._downstream)
if self._upstream and self._downstream:
# Convert _upstream and _downstream to task_groups only after both are set
self._convert_streams_to_task_groups()
for node in self._upstream:
node.set_downstream(other, edge_modifier=self)
def update_relative(
self, other: DependencyMixin, upstream: bool = True, edge_modifier: EdgeModifier | None = None
) -> None:
"""Called if we're not the "main" side of a relationship; we still run the same logic, though."""
if upstream:
self.set_upstream(other)
else:
self.set_downstream(other)
def add_edge_info(self, dag, upstream_id: str, downstream_id: str):
"""
Adds or updates task info on the DAG for this specific pair of tasks.
Called either from our relationship trigger methods above, or directly
by set_upstream/set_downstream in operators.
"""
dag.set_edge_info(upstream_id, downstream_id, {"label": self.label})
def add_to_taskgroup(self, task_group: TaskGroup) -> None:
"""No-op, since we're not a task.
We only add tasks to TaskGroups and not EdgeModifiers, but we need
this to satisfy the interface.
:meta private:
"""
# Factory functions
def Label(label: str):
"""Creates an EdgeModifier that sets a human-readable label on the edge."""
return EdgeModifier(label=label)
| 7,121 | 37.290323 | 109 |
py
|
airflow
|
airflow-main/airflow/utils/python_virtualenv.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for creating a virtual environment."""
from __future__ import annotations
import os
import sys
import warnings
import jinja2
from airflow.utils.decorators import remove_task_decorator as _remove_task_decorator
from airflow.utils.process_utils import execute_in_subprocess
def _generate_virtualenv_cmd(tmp_dir: str, python_bin: str, system_site_packages: bool) -> list[str]:
cmd = [sys.executable, "-m", "virtualenv", tmp_dir]
if system_site_packages:
cmd.append("--system-site-packages")
if python_bin is not None:
cmd.append(f"--python={python_bin}")
return cmd
def _generate_pip_install_cmd_from_file(
tmp_dir: str, requirements_file_path: str, pip_install_options: list[str]
) -> list[str]:
cmd = [f"{tmp_dir}/bin/pip", "install"] + pip_install_options + ["-r"]
return cmd + [requirements_file_path]
def _generate_pip_install_cmd_from_list(
tmp_dir: str, requirements: list[str], pip_install_options: list[str]
) -> list[str]:
cmd = [f"{tmp_dir}/bin/pip", "install"] + pip_install_options
return cmd + requirements
def remove_task_decorator(python_source: str, task_decorator_name: str) -> str:
warnings.warn(
"Import remove_task_decorator from airflow.utils.decorators instead",
DeprecationWarning,
stacklevel=2,
)
return _remove_task_decorator(python_source, task_decorator_name)
def prepare_virtualenv(
venv_directory: str,
python_bin: str,
system_site_packages: bool,
requirements: list[str] | None = None,
requirements_file_path: str | None = None,
pip_install_options: list[str] | None = None,
) -> str:
"""Creates a virtual environment and installs the additional python packages.
:param venv_directory: The path for directory where the environment will be created.
:param python_bin: Path to the Python executable.
:param system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
:param requirements: List of additional python packages.
:param requirements_file_path: Path to the ``requirements.txt`` file.
:return: Path to a binary file with Python in a virtual environment.
"""
if pip_install_options is None:
pip_install_options = []
virtualenv_cmd = _generate_virtualenv_cmd(venv_directory, python_bin, system_site_packages)
execute_in_subprocess(virtualenv_cmd)
if requirements is not None and requirements_file_path is not None:
raise Exception("Either requirements OR requirements_file_path has to be passed, but not both")
pip_cmd = None
if requirements is not None and len(requirements) != 0:
pip_cmd = _generate_pip_install_cmd_from_list(venv_directory, requirements, pip_install_options)
if requirements_file_path is not None and requirements_file_path:
pip_cmd = _generate_pip_install_cmd_from_file(
venv_directory, requirements_file_path, pip_install_options
)
if pip_cmd:
execute_in_subprocess(pip_cmd)
return f"{venv_directory}/bin/python"
def write_python_script(
jinja_context: dict,
filename: str,
render_template_as_native_obj: bool = False,
):
"""
Renders the python script to a file to execute in the virtual environment.
:param jinja_context: The jinja context variables to unpack and replace with its placeholders in the
template file.
:param filename: The name of the file to dump the rendered script to.
:param render_template_as_native_obj: If ``True``, rendered Jinja template would be converted
to a native Python object
"""
template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname(__file__))
template_env: jinja2.Environment
if render_template_as_native_obj:
template_env = jinja2.nativetypes.NativeEnvironment(
loader=template_loader, undefined=jinja2.StrictUndefined
)
else:
template_env = jinja2.Environment(loader=template_loader, undefined=jinja2.StrictUndefined)
template = template_env.get_template("python_virtualenv_script.jinja2")
template.stream(**jinja_context).dump(filename)
| 5,002 | 38.085938 | 104 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.