repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow
|
airflow-main/airflow/models/abstractoperator.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import inspect
from functools import cached_property
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Collection, Iterable, Iterator, Sequence
from sqlalchemy import select
from airflow.compat.functools import cache
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.taskmixin import DAGNode, DependencyMixin
from airflow.template.templater import Templater
from airflow.utils.context import Context
from airflow.utils.log.secrets_masker import redact
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import skip_locked, with_row_locks
from airflow.utils.state import State, TaskInstanceState
from airflow.utils.task_group import MappedTaskGroup
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import NOTSET, ArgNotSet
from airflow.utils.weight_rule import WeightRule
TaskStateChangeCallback = Callable[[Context], None]
if TYPE_CHECKING:
import jinja2 # Slow import.
from sqlalchemy.orm import Session
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DAG
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
from airflow.models.taskinstance import TaskInstance
from airflow.utils.task_group import TaskGroup
DEFAULT_OWNER: str = conf.get_mandatory_value("operators", "default_owner")
DEFAULT_POOL_SLOTS: int = 1
DEFAULT_PRIORITY_WEIGHT: int = 1
DEFAULT_QUEUE: str = conf.get_mandatory_value("operators", "default_queue")
DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST: bool = conf.getboolean(
"scheduler", "ignore_first_depends_on_past_by_default"
)
DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING: bool = False
DEFAULT_RETRIES: int = conf.getint("core", "default_task_retries", fallback=0)
DEFAULT_RETRY_DELAY: datetime.timedelta = datetime.timedelta(
seconds=conf.getint("core", "default_task_retry_delay", fallback=300)
)
MAX_RETRY_DELAY: int = conf.getint("core", "max_task_retry_delay", fallback=24 * 60 * 60)
DEFAULT_WEIGHT_RULE: WeightRule = WeightRule(
conf.get("core", "default_task_weight_rule", fallback=WeightRule.DOWNSTREAM)
)
DEFAULT_TRIGGER_RULE: TriggerRule = TriggerRule.ALL_SUCCESS
DEFAULT_TASK_EXECUTION_TIMEOUT: datetime.timedelta | None = conf.gettimedelta(
"core", "default_task_execution_timeout"
)
class NotMapped(Exception):
"""Raise if a task is neither mapped nor has any parent mapped groups."""
class AbstractOperator(Templater, DAGNode):
"""Common implementation for operators, including unmapped and mapped.
This base class is more about sharing implementations, not defining a common
interface. Unfortunately it's difficult to use this as the common base class
for typing due to BaseOperator carrying too much historical baggage.
The union type ``from airflow.models.operator import Operator`` is easier
to use for typing purposes.
:meta private:
"""
operator_class: type[BaseOperator] | dict[str, Any]
weight_rule: str
priority_weight: int
# Defines the operator level extra links.
operator_extra_links: Collection[BaseOperatorLink]
owner: str
task_id: str
outlets: list
inlets: list
trigger_rule: TriggerRule
_is_setup = False
_is_teardown = False
_on_failure_fail_dagrun = False
HIDE_ATTRS_FROM_UI: ClassVar[frozenset[str]] = frozenset(
(
"log",
"dag", # We show dag_id, don't need to show this too
"node_id", # Duplicates task_id
"task_group", # Doesn't have a useful repr, no point showing in UI
"inherits_from_empty_operator", # impl detail
# For compatibility with TG, for operators these are just the current task, no point showing
"roots",
"leaves",
# These lists are already shown via *_task_ids
"upstream_list",
"downstream_list",
# Not useful, implementation detail, already shown elsewhere
"global_operator_extra_link_dict",
"operator_extra_link_dict",
)
)
def get_dag(self) -> DAG | None:
raise NotImplementedError()
@property
def task_type(self) -> str:
raise NotImplementedError()
@property
def operator_name(self) -> str:
raise NotImplementedError()
@property
def inherits_from_empty_operator(self) -> bool:
raise NotImplementedError()
@property
def dag_id(self) -> str:
"""Returns dag id if it has one or an adhoc + owner."""
dag = self.get_dag()
if dag:
return dag.dag_id
return f"adhoc_{self.owner}"
@property
def node_id(self) -> str:
return self.task_id
@property
def is_setup(self):
"""
Whether the operator is a setup task.
:meta private:
"""
return self._is_setup
@is_setup.setter
def is_setup(self, value):
"""
Setter for is_setup property.
:meta private:
"""
if self.is_teardown is True and value is True:
raise ValueError(f"Cannot mark task '{self.task_id}' as setup; task is already a teardown.")
self._is_setup = value
@property
def is_teardown(self):
"""
Whether the operator is a teardown task.
:meta private:
"""
return self._is_teardown
@is_teardown.setter
def is_teardown(self, value):
"""
Setter for is_teardown property.
:meta private:
"""
if self.is_setup is True and value is True:
raise ValueError(f"Cannot mark task '{self.task_id}' as teardown; task is already a setup.")
self._is_teardown = value
@property
def on_failure_fail_dagrun(self):
"""
Whether the operator should fail the dagrun on failure.
:meta private:
"""
return self._on_failure_fail_dagrun
@on_failure_fail_dagrun.setter
def on_failure_fail_dagrun(self, value):
"""
Setter for on_failure_fail_dagrun property.
:meta private:
"""
if value is True and self.is_teardown is not True:
raise ValueError(
f"Cannot set task on_failure_fail_dagrun for "
f"'{self.task_id}' because it is not a teardown task."
)
self._on_failure_fail_dagrun = value
def as_setup(self):
self.is_setup = True
return self
def as_teardown(
self,
*,
setups: BaseOperator | Iterable[BaseOperator] | ArgNotSet = NOTSET,
on_failure_fail_dagrun=NOTSET,
):
self.is_teardown = True
if TYPE_CHECKING:
assert isinstance(self, BaseOperator) # is_teardown not supported for MappedOperator
self.trigger_rule = TriggerRule.ALL_DONE_SETUP_SUCCESS
if on_failure_fail_dagrun is not NOTSET:
self.on_failure_fail_dagrun = on_failure_fail_dagrun
if not isinstance(setups, ArgNotSet):
setups = [setups] if isinstance(setups, DependencyMixin) else setups
for s in setups:
s.is_setup = True
s >> self
return self
def get_direct_relative_ids(self, upstream: bool = False) -> set[str]:
"""Get direct relative IDs to the current task, upstream or downstream."""
if upstream:
return self.upstream_task_ids
return self.downstream_task_ids
def get_flat_relative_ids(self, *, upstream: bool = False) -> set[str]:
"""Get a flat set of relative IDs, upstream or downstream.
Will recurse each relative found in the direction specified.
:param upstream: Whether to look for upstream or downstream relatives.
"""
dag = self.get_dag()
if not dag:
return set()
relatives: set[str] = set()
# This is intentionally implemented as a loop, instead of calling
# get_direct_relative_ids() recursively, since Python has significant
# limitation on stack level, and a recursive implementation can blow up
# if a DAG contains very long routes.
task_ids_to_trace = self.get_direct_relative_ids(upstream)
while task_ids_to_trace:
task_ids_to_trace_next: set[str] = set()
for task_id in task_ids_to_trace:
if task_id in relatives:
continue
task_ids_to_trace_next.update(dag.task_dict[task_id].get_direct_relative_ids(upstream))
relatives.add(task_id)
task_ids_to_trace = task_ids_to_trace_next
return relatives
def get_flat_relatives(self, upstream: bool = False) -> Collection[Operator]:
"""Get a flat list of relatives, either upstream or downstream."""
dag = self.get_dag()
if not dag:
return set()
return [dag.task_dict[task_id] for task_id in self.get_flat_relative_ids(upstream=upstream)]
def get_upstreams_follow_setups(self) -> Iterable[Operator]:
"""All upstreams and, for each upstream setup, its respective teardowns."""
for task in self.get_flat_relatives(upstream=True):
yield task
if task.is_setup:
for t in task.downstream_list:
if t.is_teardown and not t == self:
yield t
def get_upstreams_only_setups_and_teardowns(self) -> Iterable[Operator]:
"""
Only *relevant* upstream setups and their teardowns.
This method is meant to be used when we are clearing the task (non-upstream) and we need
to add in the *relevant* setups and their teardowns.
Relevant in this case means, the setup has a teardown that is downstream of ``self``.
"""
downstream_teardown_ids = {
x.task_id for x in self.get_flat_relatives(upstream=False) if x.is_teardown
}
for task in self.get_flat_relatives(upstream=True):
if not task.is_setup:
continue
if not task.downstream_task_ids.isdisjoint(downstream_teardown_ids):
yield task
for t in task.downstream_list:
if t.is_teardown and not t == self:
yield t
def _iter_all_mapped_downstreams(self) -> Iterator[MappedOperator | MappedTaskGroup]:
"""Return mapped nodes that are direct dependencies of the current task.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
Note that this does not guarantee the returned tasks actually use the
current task for task mapping, but only checks those task are mapped
operators, and are downstreams of the current task.
To get a list of tasks that uses the current task for task mapping, use
:meth:`iter_mapped_dependants` instead.
"""
from airflow.models.mappedoperator import MappedOperator
from airflow.utils.task_group import TaskGroup
def _walk_group(group: TaskGroup) -> Iterable[tuple[str, DAGNode]]:
"""Recursively walk children in a task group.
This yields all direct children (including both tasks and task
groups), and all children of any task groups.
"""
for key, child in group.children.items():
yield key, child
if isinstance(child, TaskGroup):
yield from _walk_group(child)
dag = self.get_dag()
if not dag:
raise RuntimeError("Cannot check for mapped dependants when not attached to a DAG")
for key, child in _walk_group(dag.task_group):
if key == self.node_id:
continue
if not isinstance(child, (MappedOperator, MappedTaskGroup)):
continue
if self.node_id in child.upstream_task_ids:
yield child
def iter_mapped_dependants(self) -> Iterator[MappedOperator | MappedTaskGroup]:
"""Return mapped nodes that depend on the current task the expansion.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
"""
return (
downstream
for downstream in self._iter_all_mapped_downstreams()
if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
)
def iter_mapped_task_groups(self) -> Iterator[MappedTaskGroup]:
"""Return mapped task groups this task belongs to.
Groups are returned from the innermost to the outmost.
:meta private:
"""
parent = self.task_group
while parent is not None:
if isinstance(parent, MappedTaskGroup):
yield parent
parent = parent.task_group
def add_to_taskgroup(self, task_group: TaskGroup) -> None:
"""Add the task to the given task group.
:meta private:
"""
if self.node_id not in task_group.children:
task_group.add(self)
def get_closest_mapped_task_group(self) -> MappedTaskGroup | None:
"""Get the mapped task group "closest" to this task in the DAG.
:meta private:
"""
return next(self.iter_mapped_task_groups(), None)
def unmap(self, resolve: None | dict[str, Any] | tuple[Context, Session]) -> BaseOperator:
"""Get the "normal" operator from current abstract operator.
MappedOperator uses this to unmap itself based on the map index. A non-
mapped operator (i.e. BaseOperator subclass) simply returns itself.
:meta private:
"""
raise NotImplementedError()
@property
def priority_weight_total(self) -> int:
"""
Total priority weight for the task. It might include all upstream or downstream tasks.
Depending on the weight rule:
- WeightRule.ABSOLUTE - only own weight
- WeightRule.DOWNSTREAM - adds priority weight of all downstream tasks
- WeightRule.UPSTREAM - adds priority weight of all upstream tasks
"""
if self.weight_rule == WeightRule.ABSOLUTE:
return self.priority_weight
elif self.weight_rule == WeightRule.DOWNSTREAM:
upstream = False
elif self.weight_rule == WeightRule.UPSTREAM:
upstream = True
else:
upstream = False
dag = self.get_dag()
if dag is None:
return self.priority_weight
return self.priority_weight + sum(
dag.task_dict[task_id].priority_weight
for task_id in self.get_flat_relative_ids(upstream=upstream)
)
@cached_property
def operator_extra_link_dict(self) -> dict[str, Any]:
"""Returns dictionary of all extra links for the operator."""
op_extra_links_from_plugin: dict[str, Any] = {}
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.operator_extra_links is None:
raise AirflowException("Can't load operators")
for ope in plugins_manager.operator_extra_links:
if ope.operators and self.operator_class in ope.operators:
op_extra_links_from_plugin.update({ope.name: ope})
operator_extra_links_all = {link.name: link for link in self.operator_extra_links}
# Extra links defined in Plugins overrides operator links defined in operator
operator_extra_links_all.update(op_extra_links_from_plugin)
return operator_extra_links_all
@cached_property
def global_operator_extra_link_dict(self) -> dict[str, Any]:
"""Returns dictionary of all global extra links."""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.global_operator_extra_links is None:
raise AirflowException("Can't load operators")
return {link.name: link for link in plugins_manager.global_operator_extra_links}
@cached_property
def extra_links(self) -> list[str]:
return list(set(self.operator_extra_link_dict).union(self.global_operator_extra_link_dict))
def get_extra_links(self, ti: TaskInstance, link_name: str) -> str | None:
"""For an operator, gets the URLs that the ``extra_links`` entry points to.
:meta private:
:raise ValueError: The error message of a ValueError will be passed on through to
the fronted to show up as a tooltip on the disabled link.
:param ti: The TaskInstance for the URL being searched for.
:param link_name: The name of the link we're looking for the URL for. Should be
one of the options specified in ``extra_links``.
"""
link: BaseOperatorLink | None = self.operator_extra_link_dict.get(link_name)
if not link:
link = self.global_operator_extra_link_dict.get(link_name)
if not link:
return None
parameters = inspect.signature(link.get_link).parameters
old_signature = all(name != "ti_key" for name, p in parameters.items() if p.kind != p.VAR_KEYWORD)
if old_signature:
return link.get_link(self.unmap(None), ti.dag_run.logical_date) # type: ignore[misc]
return link.get_link(self.unmap(None), ti_key=ti.key)
@cache
def get_parse_time_mapped_ti_count(self) -> int:
"""Number of mapped task instances that can be created on DAG run creation.
This only considers literal mapped arguments, and would return *None*
when any non-literal values are used for mapping.
:raise NotFullyPopulated: If non-literal mapped arguments are encountered.
:raise NotMapped: If the operator is neither mapped, nor has any parent
mapped task groups.
:return: Total number of mapped TIs this task should have.
"""
group = self.get_closest_mapped_task_group()
if group is None:
raise NotMapped
return group.get_parse_time_mapped_ti_count()
def get_mapped_ti_count(self, run_id: str, *, session: Session) -> int:
"""Number of mapped TaskInstances that can be created at run time.
This considers both literal and non-literal mapped arguments, and the
result is therefore available when all depended tasks have finished. The
return value should be identical to ``parse_time_mapped_ti_count`` if
all mapped arguments are literal.
:raise NotFullyPopulated: If upstream tasks are not all complete yet.
:raise NotMapped: If the operator is neither mapped, nor has any parent
mapped task groups.
:return: Total number of mapped TIs this task should have.
"""
group = self.get_closest_mapped_task_group()
if group is None:
raise NotMapped
return group.get_mapped_ti_count(run_id, session=session)
def expand_mapped_task(self, run_id: str, *, session: Session) -> tuple[Sequence[TaskInstance], int]:
"""Create the mapped task instances for mapped task.
:raise NotMapped: If this task does not need expansion.
:return: The newly created mapped task instances (if any) in ascending
order by map index, and the maximum map index value.
"""
from sqlalchemy import func, or_
from airflow.models.baseoperator import BaseOperator
from airflow.models.mappedoperator import MappedOperator
from airflow.models.taskinstance import TaskInstance
from airflow.settings import task_instance_mutation_hook
if not isinstance(self, (BaseOperator, MappedOperator)):
raise RuntimeError(f"cannot expand unrecognized operator type {type(self).__name__}")
try:
total_length: int | None = self.get_mapped_ti_count(run_id, session=session)
except NotFullyPopulated as e:
# It's possible that the upstream tasks are not yet done, but we
# don't have upstream of upstreams in partial DAGs (possible in the
# mini-scheduler), so we ignore this exception.
if not self.dag or not self.dag.partial:
self.log.error(
"Cannot expand %r for run %s; missing upstream values: %s",
self,
run_id,
sorted(e.missing),
)
total_length = None
state: TaskInstanceState | None = None
unmapped_ti: TaskInstance | None = session.scalars(
select(TaskInstance).where(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index == -1,
or_(TaskInstance.state.in_(State.unfinished), TaskInstance.state.is_(None)),
)
).one_or_none()
all_expanded_tis: list[TaskInstance] = []
if unmapped_ti:
# The unmapped task instance still exists and is unfinished, i.e. we
# haven't tried to run it before.
if total_length is None:
# If the DAG is partial, it's likely that the upstream tasks
# are not done yet, so the task can't fail yet.
if not self.dag or not self.dag.partial:
unmapped_ti.state = TaskInstanceState.UPSTREAM_FAILED
elif total_length < 1:
# If the upstream maps this to a zero-length value, simply mark
# the unmapped task instance as SKIPPED (if needed).
self.log.info(
"Marking %s as SKIPPED since the map has %d values to expand",
unmapped_ti,
total_length,
)
unmapped_ti.state = TaskInstanceState.SKIPPED
else:
zero_index_ti_exists = (
session.scalar(
select(func.count(TaskInstance.task_id)).where(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index == 0,
)
)
> 0
)
if not zero_index_ti_exists:
# Otherwise convert this into the first mapped index, and create
# TaskInstance for other indexes.
unmapped_ti.map_index = 0
self.log.debug("Updated in place to become %s", unmapped_ti)
all_expanded_tis.append(unmapped_ti)
session.flush()
else:
self.log.debug("Deleting the original task instance: %s", unmapped_ti)
session.delete(unmapped_ti)
state = unmapped_ti.state
if total_length is None or total_length < 1:
# Nothing to fixup.
indexes_to_map: Iterable[int] = ()
else:
# Only create "missing" ones.
current_max_mapping = session.scalar(
select(func.max(TaskInstance.map_index)).where(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
)
)
indexes_to_map = range(current_max_mapping + 1, total_length)
for index in indexes_to_map:
# TODO: Make more efficient with bulk_insert_mappings/bulk_save_mappings.
ti = TaskInstance(self, run_id=run_id, map_index=index, state=state)
self.log.debug("Expanding TIs upserted %s", ti)
task_instance_mutation_hook(ti)
ti = session.merge(ti)
ti.refresh_from_task(self) # session.merge() loses task information.
all_expanded_tis.append(ti)
# Coerce the None case to 0 -- these two are almost treated identically,
# except the unmapped ti (if exists) is marked to different states.
total_expanded_ti_count = total_length or 0
# Any (old) task instances with inapplicable indexes (>= the total
# number we need) are set to "REMOVED".
query = select(TaskInstance).where(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index >= total_expanded_ti_count,
)
query = with_row_locks(query, of=TaskInstance, session=session, **skip_locked(session=session))
to_update = session.scalars(query)
for ti in to_update:
ti.state = TaskInstanceState.REMOVED
session.flush()
return all_expanded_tis, total_expanded_ti_count - 1
def render_template_fields(
self,
context: Context,
jinja_env: jinja2.Environment | None = None,
) -> None:
"""Template all attributes listed in *self.template_fields*.
If the operator is mapped, this should return the unmapped, fully
rendered, and map-expanded operator. The mapped operator should not be
modified. However, *context* may be modified in-place to reference the
unmapped operator for template rendering.
If the operator is not mapped, this should modify the operator in-place.
"""
raise NotImplementedError()
def _render(self, template, context, dag: DAG | None = None):
if dag is None:
dag = self.get_dag()
return super()._render(template, context, dag=dag)
def get_template_env(self, dag: DAG | None = None) -> jinja2.Environment:
"""Get the template environment for rendering templates."""
if dag is None:
dag = self.get_dag()
return super().get_template_env(dag=dag)
@provide_session
def _do_render_template_fields(
self,
parent: Any,
template_fields: Iterable[str],
context: Context,
jinja_env: jinja2.Environment,
seen_oids: set[int],
*,
session: Session = NEW_SESSION,
) -> None:
"""Override the base to use custom error logging."""
for attr_name in template_fields:
try:
value = getattr(parent, attr_name)
except AttributeError:
raise AttributeError(
f"{attr_name!r} is configured as a template field "
f"but {parent.task_type} does not have this attribute."
)
try:
if not value:
continue
except Exception:
# This may happen if the templated field points to a class which does not support `__bool__`,
# such as Pandas DataFrames:
# https://github.com/pandas-dev/pandas/blob/9135c3aaf12d26f857fcc787a5b64d521c51e379/pandas/core/generic.py#L1465
self.log.info(
"Unable to check if the value of type '%s' is False for task '%s', field '%s'.",
type(value).__name__,
self.task_id,
attr_name,
)
# We may still want to render custom classes which do not support __bool__
pass
try:
rendered_content = self.render_template(
value,
context,
jinja_env,
seen_oids,
)
except Exception:
value_masked = redact(name=attr_name, value=value)
self.log.exception(
"Exception rendering Jinja template for task '%s', field '%s'. Template: %r",
self.task_id,
attr_name,
value_masked,
)
raise
else:
setattr(parent, attr_name, rendered_content)
| 29,642 | 39.058108 | 129 |
py
|
airflow
|
airflow-main/airflow/models/dataset.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from urllib.parse import urlsplit
import sqlalchemy_jsonfield
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
ForeignKeyConstraint,
Index,
Integer,
PrimaryKeyConstraint,
String,
Table,
text,
)
from sqlalchemy.orm import relationship
from airflow.datasets import Dataset
from airflow.models.base import Base, StringID
from airflow.settings import json
from airflow.utils import timezone
from airflow.utils.sqlalchemy import UtcDateTime
class DatasetModel(Base):
"""
A table to store datasets.
:param uri: a string that uniquely identifies the dataset
:param extra: JSON field for arbitrary extra info
"""
id = Column(Integer, primary_key=True, autoincrement=True)
uri = Column(
String(length=3000).with_variant(
String(
length=3000,
# latin1 allows for more indexed length in mysql
# and this field should only be ascii chars
collation="latin1_general_cs",
),
"mysql",
),
nullable=False,
)
extra = Column(sqlalchemy_jsonfield.JSONField(json=json), nullable=False, default={})
created_at = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
updated_at = Column(UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=False)
is_orphaned = Column(Boolean, default=False, nullable=False, server_default="0")
consuming_dags = relationship("DagScheduleDatasetReference", back_populates="dataset")
producing_tasks = relationship("TaskOutletDatasetReference", back_populates="dataset")
__tablename__ = "dataset"
__table_args__ = (
Index("idx_uri_unique", uri, unique=True),
{"sqlite_autoincrement": True}, # ensures PK values not reused
)
@classmethod
def from_public(cls, obj: Dataset) -> DatasetModel:
return cls(uri=obj.uri, extra=obj.extra)
def __init__(self, uri: str, **kwargs):
try:
uri.encode("ascii")
except UnicodeEncodeError:
raise ValueError("URI must be ascii")
parsed = urlsplit(uri)
if parsed.scheme and parsed.scheme.lower() == "airflow":
raise ValueError("Scheme `airflow` is reserved.")
super().__init__(uri=uri, **kwargs)
def __eq__(self, other):
if isinstance(other, (self.__class__, Dataset)):
return self.uri == other.uri
else:
return NotImplemented
def __hash__(self):
return hash(self.uri)
def __repr__(self):
return f"{self.__class__.__name__}(uri={self.uri!r}, extra={self.extra!r})"
class DagScheduleDatasetReference(Base):
"""References from a DAG to a dataset of which it is a consumer."""
dataset_id = Column(Integer, primary_key=True, nullable=False)
dag_id = Column(StringID(), primary_key=True, nullable=False)
created_at = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
updated_at = Column(UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=False)
dataset = relationship("DatasetModel", back_populates="consuming_dags")
queue_records = relationship(
"DatasetDagRunQueue",
primaryjoin="""and_(
DagScheduleDatasetReference.dataset_id == foreign(DatasetDagRunQueue.dataset_id),
DagScheduleDatasetReference.dag_id == foreign(DatasetDagRunQueue.target_dag_id),
)""",
cascade="all, delete, delete-orphan",
)
__tablename__ = "dag_schedule_dataset_reference"
__table_args__ = (
PrimaryKeyConstraint(dataset_id, dag_id, name="dsdr_pkey", mssql_clustered=True),
ForeignKeyConstraint(
(dataset_id,),
["dataset.id"],
name="dsdr_dataset_fkey",
ondelete="CASCADE",
),
ForeignKeyConstraint(
columns=(dag_id,),
refcolumns=["dag.dag_id"],
name="dsdr_dag_id_fkey",
ondelete="CASCADE",
),
)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.dataset_id == other.dataset_id and self.dag_id == other.dag_id
else:
return NotImplemented
def __hash__(self):
return hash(self.__mapper__.primary_key)
def __repr__(self):
args = []
for attr in [x.name for x in self.__mapper__.primary_key]:
args.append(f"{attr}={getattr(self, attr)!r}")
return f"{self.__class__.__name__}({', '.join(args)})"
class TaskOutletDatasetReference(Base):
"""References from a task to a dataset that it updates / produces."""
dataset_id = Column(Integer, primary_key=True, nullable=False)
dag_id = Column(StringID(), primary_key=True, nullable=False)
task_id = Column(StringID(), primary_key=True, nullable=False)
created_at = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
updated_at = Column(UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=False)
dataset = relationship("DatasetModel", back_populates="producing_tasks")
__tablename__ = "task_outlet_dataset_reference"
__table_args__ = (
ForeignKeyConstraint(
(dataset_id,),
["dataset.id"],
name="todr_dataset_fkey",
ondelete="CASCADE",
),
PrimaryKeyConstraint(dataset_id, dag_id, task_id, name="todr_pkey", mssql_clustered=True),
ForeignKeyConstraint(
columns=(dag_id,),
refcolumns=["dag.dag_id"],
name="todr_dag_id_fkey",
ondelete="CASCADE",
),
)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.dataset_id == other.dataset_id
and self.dag_id == other.dag_id
and self.task_id == other.task_id
)
else:
return NotImplemented
def __hash__(self):
return hash(self.__mapper__.primary_key)
def __repr__(self):
args = []
for attr in [x.name for x in self.__mapper__.primary_key]:
args.append(f"{attr}={getattr(self, attr)!r}")
return f"{self.__class__.__name__}({', '.join(args)})"
class DatasetDagRunQueue(Base):
"""Model for storing dataset events that need processing."""
dataset_id = Column(Integer, primary_key=True, nullable=False)
target_dag_id = Column(StringID(), primary_key=True, nullable=False)
created_at = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
__tablename__ = "dataset_dag_run_queue"
__table_args__ = (
PrimaryKeyConstraint(dataset_id, target_dag_id, name="datasetdagrunqueue_pkey", mssql_clustered=True),
ForeignKeyConstraint(
(dataset_id,),
["dataset.id"],
name="ddrq_dataset_fkey",
ondelete="CASCADE",
),
ForeignKeyConstraint(
(target_dag_id,),
["dag.dag_id"],
name="ddrq_dag_fkey",
ondelete="CASCADE",
),
)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.dataset_id == other.dataset_id and self.target_dag_id == other.target_dag_id
else:
return NotImplemented
def __hash__(self):
return hash(self.__mapper__.primary_key)
def __repr__(self):
args = []
for attr in [x.name for x in self.__mapper__.primary_key]:
args.append(f"{attr}={getattr(self, attr)!r}")
return f"{self.__class__.__name__}({', '.join(args)})"
association_table = Table(
"dagrun_dataset_event",
Base.metadata,
Column("dag_run_id", ForeignKey("dag_run.id", ondelete="CASCADE"), primary_key=True),
Column("event_id", ForeignKey("dataset_event.id", ondelete="CASCADE"), primary_key=True),
Index("idx_dagrun_dataset_events_dag_run_id", "dag_run_id"),
Index("idx_dagrun_dataset_events_event_id", "event_id"),
)
class DatasetEvent(Base):
"""
A table to store datasets events.
:param dataset_id: reference to DatasetModel record
:param extra: JSON field for arbitrary extra info
:param source_task_id: the task_id of the TI which updated the dataset
:param source_dag_id: the dag_id of the TI which updated the dataset
:param source_run_id: the run_id of the TI which updated the dataset
:param source_map_index: the map_index of the TI which updated the dataset
:param timestamp: the time the event was logged
We use relationships instead of foreign keys so that dataset events are not deleted even
if the foreign key object is.
"""
id = Column(Integer, primary_key=True, autoincrement=True)
dataset_id = Column(Integer, nullable=False)
extra = Column(sqlalchemy_jsonfield.JSONField(json=json), nullable=False, default={})
source_task_id = Column(StringID(), nullable=True)
source_dag_id = Column(StringID(), nullable=True)
source_run_id = Column(StringID(), nullable=True)
source_map_index = Column(Integer, nullable=True, server_default=text("-1"))
timestamp = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
__tablename__ = "dataset_event"
__table_args__ = (
Index("idx_dataset_id_timestamp", dataset_id, timestamp),
{"sqlite_autoincrement": True}, # ensures PK values not reused
)
created_dagruns = relationship(
"DagRun",
secondary=association_table,
backref="consumed_dataset_events",
)
source_task_instance = relationship(
"TaskInstance",
primaryjoin="""and_(
DatasetEvent.source_dag_id == foreign(TaskInstance.dag_id),
DatasetEvent.source_run_id == foreign(TaskInstance.run_id),
DatasetEvent.source_task_id == foreign(TaskInstance.task_id),
DatasetEvent.source_map_index == foreign(TaskInstance.map_index),
)""",
viewonly=True,
lazy="select",
uselist=False,
)
source_dag_run = relationship(
"DagRun",
primaryjoin="""and_(
DatasetEvent.source_dag_id == foreign(DagRun.dag_id),
DatasetEvent.source_run_id == foreign(DagRun.run_id),
)""",
viewonly=True,
lazy="select",
uselist=False,
)
dataset = relationship(
DatasetModel,
primaryjoin="DatasetEvent.dataset_id == foreign(DatasetModel.id)",
viewonly=True,
lazy="select",
uselist=False,
)
@property
def uri(self):
return self.dataset.uri
def __repr__(self) -> str:
args = []
for attr in [
"id",
"dataset_id",
"extra",
"source_task_id",
"source_dag_id",
"source_run_id",
"source_map_index",
]:
args.append(f"{attr}={getattr(self, attr)!r}")
return f"{self.__class__.__name__}({', '.join(args)})"
| 11,826 | 33.887906 | 110 |
py
|
airflow
|
airflow-main/airflow/models/log.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import Column, Index, Integer, String, Text
from airflow.models.base import Base, StringID
from airflow.utils import timezone
from airflow.utils.sqlalchemy import UtcDateTime
class Log(Base):
"""Used to actively log events to the database."""
__tablename__ = "log"
id = Column(Integer, primary_key=True)
dttm = Column(UtcDateTime)
dag_id = Column(StringID())
task_id = Column(StringID())
map_index = Column(Integer)
event = Column(String(30))
execution_date = Column(UtcDateTime)
owner = Column(String(500))
extra = Column(Text)
__table_args__ = (
Index("idx_log_dag", dag_id),
Index("idx_log_dttm", dttm),
Index("idx_log_event", event),
)
def __init__(self, event, task_instance=None, owner=None, extra=None, **kwargs):
self.dttm = timezone.utcnow()
self.event = event
self.extra = extra
task_owner = None
if task_instance:
self.dag_id = task_instance.dag_id
self.task_id = task_instance.task_id
self.execution_date = task_instance.execution_date
self.map_index = task_instance.map_index
if getattr(task_instance, "task", None):
task_owner = task_instance.task.owner
if "task_id" in kwargs:
self.task_id = kwargs["task_id"]
if "dag_id" in kwargs:
self.dag_id = kwargs["dag_id"]
if kwargs.get("execution_date"):
self.execution_date = kwargs["execution_date"]
if "map_index" in kwargs:
self.map_index = kwargs["map_index"]
self.owner = owner or task_owner
def __str__(self) -> str:
return f"Log({self.event}, {self.task_id}, {self.owner}, {self.extra})"
| 2,600 | 33.223684 | 84 |
py
|
airflow
|
airflow-main/airflow/models/dagcode.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import os
import struct
from datetime import datetime
from typing import Collection, Iterable
from sqlalchemy import BigInteger, Column, String, Text, delete, select
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import literal
from airflow.exceptions import AirflowException, DagCodeNotFound
from airflow.models.base import Base
from airflow.utils import timezone
from airflow.utils.file import correct_maybe_zipped, open_maybe_zipped
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
log = logging.getLogger(__name__)
class DagCode(Base):
"""A table for DAGs code.
dag_code table contains code of DAG files synchronized by scheduler.
For details on dag serialization see SerializedDagModel
"""
__tablename__ = "dag_code"
fileloc_hash = Column(BigInteger, nullable=False, primary_key=True, autoincrement=False)
fileloc = Column(String(2000), nullable=False)
# The max length of fileloc exceeds the limit of indexing.
last_updated = Column(UtcDateTime, nullable=False)
source_code = Column(Text().with_variant(MEDIUMTEXT(), "mysql"), nullable=False)
def __init__(self, full_filepath: str, source_code: str | None = None):
self.fileloc = full_filepath
self.fileloc_hash = DagCode.dag_fileloc_hash(self.fileloc)
self.last_updated = timezone.utcnow()
self.source_code = source_code or DagCode.code(self.fileloc)
@provide_session
def sync_to_db(self, session: Session = NEW_SESSION) -> None:
"""Writes code into database.
:param session: ORM Session
"""
self.bulk_sync_to_db([self.fileloc], session)
@classmethod
@provide_session
def bulk_sync_to_db(cls, filelocs: Iterable[str], session: Session = NEW_SESSION) -> None:
"""Writes code in bulk into database.
:param filelocs: file paths of DAGs to sync
:param session: ORM Session
"""
filelocs = set(filelocs)
filelocs_to_hashes = {fileloc: DagCode.dag_fileloc_hash(fileloc) for fileloc in filelocs}
existing_orm_dag_codes = session.scalars(
select(DagCode)
.filter(DagCode.fileloc_hash.in_(filelocs_to_hashes.values()))
.with_for_update(of=DagCode)
).all()
if existing_orm_dag_codes:
existing_orm_dag_codes_map = {
orm_dag_code.fileloc: orm_dag_code for orm_dag_code in existing_orm_dag_codes
}
else:
existing_orm_dag_codes_map = {}
existing_orm_dag_codes_by_fileloc_hashes = {orm.fileloc_hash: orm for orm in existing_orm_dag_codes}
existing_orm_filelocs = {orm.fileloc for orm in existing_orm_dag_codes_by_fileloc_hashes.values()}
if not existing_orm_filelocs.issubset(filelocs):
conflicting_filelocs = existing_orm_filelocs.difference(filelocs)
hashes_to_filelocs = {DagCode.dag_fileloc_hash(fileloc): fileloc for fileloc in filelocs}
message = ""
for fileloc in conflicting_filelocs:
filename = hashes_to_filelocs[DagCode.dag_fileloc_hash(fileloc)]
message += (
f"Filename '{filename}' causes a hash collision in the "
f"database with '{fileloc}'. Please rename the file."
)
raise AirflowException(message)
existing_filelocs = {dag_code.fileloc for dag_code in existing_orm_dag_codes}
missing_filelocs = filelocs.difference(existing_filelocs)
for fileloc in missing_filelocs:
orm_dag_code = DagCode(fileloc, cls._get_code_from_file(fileloc))
session.add(orm_dag_code)
for fileloc in existing_filelocs:
current_version = existing_orm_dag_codes_by_fileloc_hashes[filelocs_to_hashes[fileloc]]
file_mod_time = datetime.fromtimestamp(
os.path.getmtime(correct_maybe_zipped(fileloc)), tz=timezone.utc
)
if file_mod_time > current_version.last_updated:
orm_dag_code = existing_orm_dag_codes_map[fileloc]
orm_dag_code.last_updated = file_mod_time
orm_dag_code.source_code = cls._get_code_from_file(orm_dag_code.fileloc)
session.merge(orm_dag_code)
@classmethod
@provide_session
def remove_deleted_code(cls, alive_dag_filelocs: Collection[str], session: Session = NEW_SESSION) -> None:
"""Deletes code not included in alive_dag_filelocs.
:param alive_dag_filelocs: file paths of alive DAGs
:param session: ORM Session
"""
alive_fileloc_hashes = [cls.dag_fileloc_hash(fileloc) for fileloc in alive_dag_filelocs]
log.debug("Deleting code from %s table ", cls.__tablename__)
session.execute(
delete(cls)
.where(cls.fileloc_hash.notin_(alive_fileloc_hashes), cls.fileloc.notin_(alive_dag_filelocs))
.execution_options(synchronize_session="fetch")
)
@classmethod
@provide_session
def has_dag(cls, fileloc: str, session: Session = NEW_SESSION) -> bool:
"""Checks a file exist in dag_code table.
:param fileloc: the file to check
:param session: ORM Session
"""
fileloc_hash = cls.dag_fileloc_hash(fileloc)
return (
session.scalars(select(literal(True)).where(cls.fileloc_hash == fileloc_hash)).one_or_none()
is not None
)
@classmethod
def get_code_by_fileloc(cls, fileloc: str) -> str:
"""Returns source code for a given fileloc.
:param fileloc: file path of a DAG
:return: source code as string
"""
return cls.code(fileloc)
@classmethod
def code(cls, fileloc) -> str:
"""Returns source code for this DagCode object.
:return: source code as string
"""
return cls._get_code_from_db(fileloc)
@staticmethod
def _get_code_from_file(fileloc):
with open_maybe_zipped(fileloc, "r") as f:
code = f.read()
return code
@classmethod
@provide_session
def _get_code_from_db(cls, fileloc, session: Session = NEW_SESSION) -> str:
dag_code = session.scalar(select(cls).where(cls.fileloc_hash == cls.dag_fileloc_hash(fileloc)))
if not dag_code:
raise DagCodeNotFound()
else:
code = dag_code.source_code
return code
@staticmethod
def dag_fileloc_hash(full_filepath: str) -> int:
"""Hashing file location for indexing.
:param full_filepath: full filepath of DAG file
:return: hashed full_filepath
"""
# Hashing is needed because the length of fileloc is 2000 as an Airflow convention,
# which is over the limit of indexing.
import hashlib
# Only 7 bytes because MySQL BigInteger can hold only 8 bytes (signed).
return struct.unpack(">Q", hashlib.sha1(full_filepath.encode("utf-8")).digest()[-8:])[0] >> 8
| 7,951 | 37.980392 | 110 |
py
|
airflow
|
airflow-main/airflow/models/xcom.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections.abc
import contextlib
import datetime
import inspect
import itertools
import json
import logging
import pickle
import warnings
from functools import cached_property, wraps
from typing import TYPE_CHECKING, Any, Generator, Iterable, cast, overload
import attr
import pendulum
from sqlalchemy import (
Column,
ForeignKeyConstraint,
Index,
Integer,
LargeBinary,
PrimaryKeyConstraint,
String,
delete,
text,
)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import Query, Session, reconstructor, relationship
from sqlalchemy.orm.exc import NoResultFound
from airflow import settings
from airflow.api_internal.internal_api_call import internal_api_call
from airflow.configuration import conf
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.models.base import COLLATION_ARGS, ID_LEN, Base
from airflow.utils import timezone
from airflow.utils.helpers import exactly_one, is_container
from airflow.utils.json import XComDecoder, XComEncoder
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
# XCom constants below are needed for providers backward compatibility,
# which should import the constants directly after apache-airflow>=2.6.0
from airflow.utils.xcom import (
MAX_XCOM_SIZE, # noqa: F401
XCOM_RETURN_KEY,
)
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from airflow.models.taskinstancekey import TaskInstanceKey
class BaseXCom(Base, LoggingMixin):
"""Base class for XCom objects."""
__tablename__ = "xcom"
dag_run_id = Column(Integer(), nullable=False, primary_key=True)
task_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False, primary_key=True)
map_index = Column(Integer, primary_key=True, nullable=False, server_default=text("-1"))
key = Column(String(512, **COLLATION_ARGS), nullable=False, primary_key=True)
# Denormalized for easier lookup.
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
run_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
value = Column(LargeBinary)
timestamp = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
__table_args__ = (
# Ideally we should create a unique index over (key, dag_id, task_id, run_id),
# but it goes over MySQL's index length limit. So we instead index 'key'
# separately, and enforce uniqueness with DagRun.id instead.
Index("idx_xcom_key", key),
Index("idx_xcom_task_instance", dag_id, task_id, run_id, map_index),
PrimaryKeyConstraint(
"dag_run_id", "task_id", "map_index", "key", name="xcom_pkey", mssql_clustered=True
),
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="xcom_task_instance_fkey",
ondelete="CASCADE",
),
)
dag_run = relationship(
"DagRun",
primaryjoin="BaseXCom.dag_run_id == foreign(DagRun.id)",
uselist=False,
lazy="joined",
passive_deletes="all",
)
execution_date = association_proxy("dag_run", "execution_date")
@reconstructor
def init_on_load(self):
"""
Called by the ORM after the instance has been loaded from the DB or otherwise reconstituted
i.e automatically deserialize Xcom value when loading from DB.
"""
self.value = self.orm_deserialize_value()
def __repr__(self):
if self.map_index < 0:
return f'<XCom "{self.key}" ({self.task_id} @ {self.run_id})>'
return f'<XCom "{self.key}" ({self.task_id}[{self.map_index}] @ {self.run_id})>'
@overload
@classmethod
def set(
cls,
key: str,
value: Any,
*,
dag_id: str,
task_id: str,
run_id: str,
map_index: int = -1,
session: Session = NEW_SESSION,
) -> None:
"""Store an XCom value.
A deprecated form of this function accepts ``execution_date`` instead of
``run_id``. The two arguments are mutually exclusive.
:param key: Key to store the XCom.
:param value: XCom value to store.
:param dag_id: DAG ID.
:param task_id: Task ID.
:param run_id: DAG run ID for the task.
:param map_index: Optional map index to assign XCom for a mapped task.
The default is ``-1`` (set for a non-mapped task).
:param session: Database session. If not given, a new session will be
created for this function.
"""
@overload
@classmethod
def set(
cls,
key: str,
value: Any,
task_id: str,
dag_id: str,
execution_date: datetime.datetime,
session: Session = NEW_SESSION,
) -> None:
"""Store an XCom value.
:sphinx-autoapi-skip:
"""
@classmethod
@provide_session
def set(
cls,
key: str,
value: Any,
task_id: str,
dag_id: str,
execution_date: datetime.datetime | None = None,
session: Session = NEW_SESSION,
*,
run_id: str | None = None,
map_index: int = -1,
) -> None:
"""Store an XCom value.
:sphinx-autoapi-skip:
"""
from airflow.models.dagrun import DagRun
if not exactly_one(execution_date is not None, run_id is not None):
raise ValueError(
f"Exactly one of run_id or execution_date must be passed. "
f"Passed execution_date={execution_date}, run_id={run_id}"
)
if run_id is None:
message = "Passing 'execution_date' to 'XCom.set()' is deprecated. Use 'run_id' instead."
warnings.warn(message, RemovedInAirflow3Warning, stacklevel=3)
try:
dag_run_id, run_id = (
session.query(DagRun.id, DagRun.run_id)
.filter(DagRun.dag_id == dag_id, DagRun.execution_date == execution_date)
.one()
)
except NoResultFound:
raise ValueError(f"DAG run not found on DAG {dag_id!r} at {execution_date}") from None
else:
dag_run_id = session.query(DagRun.id).filter_by(dag_id=dag_id, run_id=run_id).scalar()
if dag_run_id is None:
raise ValueError(f"DAG run not found on DAG {dag_id!r} with ID {run_id!r}")
# Seamlessly resolve LazyXComAccess to a list. This is intended to work
# as a "lazy list" to avoid pulling a ton of XComs unnecessarily, but if
# it's pushed into XCom, the user should be aware of the performance
# implications, and this avoids leaking the implementation detail.
if isinstance(value, LazyXComAccess):
warning_message = (
"Coercing mapped lazy proxy %s from task %s (DAG %s, run %s) "
"to list, which may degrade performance. Review resource "
"requirements for this operation, and call list() to suppress "
"this message. See Dynamic Task Mapping documentation for "
"more information about lazy proxy objects."
)
log.warning(
warning_message,
"return value" if key == XCOM_RETURN_KEY else f"value {key}",
task_id,
dag_id,
run_id or execution_date,
)
value = list(value)
value = cls.serialize_value(
value=value,
key=key,
task_id=task_id,
dag_id=dag_id,
run_id=run_id,
map_index=map_index,
)
# Remove duplicate XComs and insert a new one.
session.execute(
delete(cls).where(
cls.key == key,
cls.run_id == run_id,
cls.task_id == task_id,
cls.dag_id == dag_id,
cls.map_index == map_index,
)
)
new = cast(Any, cls)( # Work around Mypy complaining model not defining '__init__'.
dag_run_id=dag_run_id,
key=key,
value=value,
run_id=run_id,
task_id=task_id,
dag_id=dag_id,
map_index=map_index,
)
session.add(new)
session.flush()
@staticmethod
@provide_session
@internal_api_call
def get_value(
*,
ti_key: TaskInstanceKey,
key: str | None = None,
session: Session = NEW_SESSION,
) -> Any:
"""Retrieve an XCom value for a task instance.
This method returns "full" XCom values (i.e. uses ``deserialize_value``
from the XCom backend). Use :meth:`get_many` if you want the "shortened"
value via ``orm_deserialize_value``.
If there are no results, *None* is returned. If multiple XCom entries
match the criteria, an arbitrary one is returned.
:param ti_key: The TaskInstanceKey to look up the XCom for.
:param key: A key for the XCom. If provided, only XCom with matching
keys will be returned. Pass *None* (default) to remove the filter.
:param session: Database session. If not given, a new session will be
created for this function.
"""
return BaseXCom.get_one(
key=key,
task_id=ti_key.task_id,
dag_id=ti_key.dag_id,
run_id=ti_key.run_id,
map_index=ti_key.map_index,
session=session,
)
@overload
@staticmethod
@internal_api_call
def get_one(
*,
key: str | None = None,
dag_id: str | None = None,
task_id: str | None = None,
run_id: str | None = None,
map_index: int | None = None,
session: Session = NEW_SESSION,
) -> Any | None:
"""Retrieve an XCom value, optionally meeting certain criteria.
This method returns "full" XCom values (i.e. uses ``deserialize_value``
from the XCom backend). Use :meth:`get_many` if you want the "shortened"
value via ``orm_deserialize_value``.
If there are no results, *None* is returned. If multiple XCom entries
match the criteria, an arbitrary one is returned.
A deprecated form of this function accepts ``execution_date`` instead of
``run_id``. The two arguments are mutually exclusive.
.. seealso:: ``get_value()`` is a convenience function if you already
have a structured TaskInstance or TaskInstanceKey object available.
:param run_id: DAG run ID for the task.
:param dag_id: Only pull XCom from this DAG. Pass *None* (default) to
remove the filter.
:param task_id: Only XCom from task with matching ID will be pulled.
Pass *None* (default) to remove the filter.
:param map_index: Only XCom from task with matching ID will be pulled.
Pass *None* (default) to remove the filter.
:param key: A key for the XCom. If provided, only XCom with matching
keys will be returned. Pass *None* (default) to remove the filter.
:param include_prior_dates: If *False* (default), only XCom from the
specified DAG run is returned. If *True*, the latest matching XCom is
returned regardless of the run it belongs to.
:param session: Database session. If not given, a new session will be
created for this function.
"""
@overload
@staticmethod
@internal_api_call
def get_one(
execution_date: datetime.datetime,
key: str | None = None,
task_id: str | None = None,
dag_id: str | None = None,
include_prior_dates: bool = False,
session: Session = NEW_SESSION,
) -> Any | None:
"""Retrieve an XCom value, optionally meeting certain criteria.
:sphinx-autoapi-skip:
"""
@staticmethod
@provide_session
@internal_api_call
def get_one(
execution_date: datetime.datetime | None = None,
key: str | None = None,
task_id: str | None = None,
dag_id: str | None = None,
include_prior_dates: bool = False,
session: Session = NEW_SESSION,
*,
run_id: str | None = None,
map_index: int | None = None,
) -> Any | None:
"""Retrieve an XCom value, optionally meeting certain criteria.
:sphinx-autoapi-skip:
"""
if not exactly_one(execution_date is not None, run_id is not None):
raise ValueError("Exactly one of run_id or execution_date must be passed")
if run_id:
query = BaseXCom.get_many(
run_id=run_id,
key=key,
task_ids=task_id,
dag_ids=dag_id,
map_indexes=map_index,
include_prior_dates=include_prior_dates,
limit=1,
session=session,
)
elif execution_date is not None:
message = "Passing 'execution_date' to 'XCom.get_one()' is deprecated. Use 'run_id' instead."
warnings.warn(message, RemovedInAirflow3Warning, stacklevel=3)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
query = BaseXCom.get_many(
execution_date=execution_date,
key=key,
task_ids=task_id,
dag_ids=dag_id,
map_indexes=map_index,
include_prior_dates=include_prior_dates,
limit=1,
session=session,
)
else:
raise RuntimeError("Should not happen?")
result = query.with_entities(BaseXCom.value).first()
if result:
return BaseXCom.deserialize_value(result)
return None
@overload
@staticmethod
def get_many(
*,
run_id: str,
key: str | None = None,
task_ids: str | Iterable[str] | None = None,
dag_ids: str | Iterable[str] | None = None,
map_indexes: int | Iterable[int] | None = None,
include_prior_dates: bool = False,
limit: int | None = None,
session: Session = NEW_SESSION,
) -> Query:
"""Composes a query to get one or more XCom entries.
This function returns an SQLAlchemy query of full XCom objects. If you
just want one stored value, use :meth:`get_one` instead.
A deprecated form of this function accepts ``execution_date`` instead of
``run_id``. The two arguments are mutually exclusive.
:param run_id: DAG run ID for the task.
:param key: A key for the XComs. If provided, only XComs with matching
keys will be returned. Pass *None* (default) to remove the filter.
:param task_ids: Only XComs from task with matching IDs will be pulled.
Pass *None* (default) to remove the filter.
:param dag_ids: Only pulls XComs from specified DAGs. Pass *None*
(default) to remove the filter.
:param map_indexes: Only XComs from matching map indexes will be pulled.
Pass *None* (default) to remove the filter.
:param include_prior_dates: If *False* (default), only XComs from the
specified DAG run are returned. If *True*, all matching XComs are
returned regardless of the run it belongs to.
:param session: Database session. If not given, a new session will be
created for this function.
:param limit: Limiting returning XComs
"""
@overload
@staticmethod
@internal_api_call
def get_many(
execution_date: datetime.datetime,
key: str | None = None,
task_ids: str | Iterable[str] | None = None,
dag_ids: str | Iterable[str] | None = None,
map_indexes: int | Iterable[int] | None = None,
include_prior_dates: bool = False,
limit: int | None = None,
session: Session = NEW_SESSION,
) -> Query:
"""Composes a query to get one or more XCom entries.
:sphinx-autoapi-skip:
"""
@staticmethod
@provide_session
@internal_api_call
def get_many(
execution_date: datetime.datetime | None = None,
key: str | None = None,
task_ids: str | Iterable[str] | None = None,
dag_ids: str | Iterable[str] | None = None,
map_indexes: int | Iterable[int] | None = None,
include_prior_dates: bool = False,
limit: int | None = None,
session: Session = NEW_SESSION,
*,
run_id: str | None = None,
) -> Query:
"""Composes a query to get one or more XCom entries.
:sphinx-autoapi-skip:
"""
from airflow.models.dagrun import DagRun
if not exactly_one(execution_date is not None, run_id is not None):
raise ValueError(
f"Exactly one of run_id or execution_date must be passed. "
f"Passed execution_date={execution_date}, run_id={run_id}"
)
if execution_date is not None:
message = "Passing 'execution_date' to 'XCom.get_many()' is deprecated. Use 'run_id' instead."
warnings.warn(message, RemovedInAirflow3Warning, stacklevel=3)
query = session.query(BaseXCom).join(BaseXCom.dag_run)
if key:
query = query.filter(BaseXCom.key == key)
if is_container(task_ids):
query = query.filter(BaseXCom.task_id.in_(task_ids))
elif task_ids is not None:
query = query.filter(BaseXCom.task_id == task_ids)
if is_container(dag_ids):
query = query.filter(BaseXCom.dag_id.in_(dag_ids))
elif dag_ids is not None:
query = query.filter(BaseXCom.dag_id == dag_ids)
if isinstance(map_indexes, range) and map_indexes.step == 1:
query = query.filter(
BaseXCom.map_index >= map_indexes.start, BaseXCom.map_index < map_indexes.stop
)
elif is_container(map_indexes):
query = query.filter(BaseXCom.map_index.in_(map_indexes))
elif map_indexes is not None:
query = query.filter(BaseXCom.map_index == map_indexes)
if include_prior_dates:
if execution_date is not None:
query = query.filter(DagRun.execution_date <= execution_date)
else:
dr = session.query(DagRun.execution_date).filter(DagRun.run_id == run_id).subquery()
query = query.filter(BaseXCom.execution_date <= dr.c.execution_date)
elif execution_date is not None:
query = query.filter(DagRun.execution_date == execution_date)
else:
query = query.filter(BaseXCom.run_id == run_id)
query = query.order_by(DagRun.execution_date.desc(), BaseXCom.timestamp.desc())
if limit:
return query.limit(limit)
return query
@classmethod
@provide_session
def delete(cls, xcoms: XCom | Iterable[XCom], session: Session) -> None:
"""Delete one or multiple XCom entries."""
if isinstance(xcoms, XCom):
xcoms = [xcoms]
for xcom in xcoms:
if not isinstance(xcom, XCom):
raise TypeError(f"Expected XCom; received {xcom.__class__.__name__}")
session.delete(xcom)
session.commit()
@overload
@staticmethod
@internal_api_call
def clear(
*,
dag_id: str,
task_id: str,
run_id: str,
map_index: int | None = None,
session: Session = NEW_SESSION,
) -> None:
"""Clear all XCom data from the database for the given task instance.
A deprecated form of this function accepts ``execution_date`` instead of
``run_id``. The two arguments are mutually exclusive.
:param dag_id: ID of DAG to clear the XCom for.
:param task_id: ID of task to clear the XCom for.
:param run_id: ID of DAG run to clear the XCom for.
:param map_index: If given, only clear XCom from this particular mapped
task. The default ``None`` clears *all* XComs from the task.
:param session: Database session. If not given, a new session will be
created for this function.
"""
@overload
@staticmethod
@internal_api_call
def clear(
execution_date: pendulum.DateTime,
dag_id: str,
task_id: str,
session: Session = NEW_SESSION,
) -> None:
"""Clear all XCom data from the database for the given task instance.
:sphinx-autoapi-skip:
"""
@staticmethod
@provide_session
@internal_api_call
def clear(
execution_date: pendulum.DateTime | None = None,
dag_id: str | None = None,
task_id: str | None = None,
session: Session = NEW_SESSION,
*,
run_id: str | None = None,
map_index: int | None = None,
) -> None:
"""Clear all XCom data from the database for the given task instance.
:sphinx-autoapi-skip:
"""
from airflow.models import DagRun
# Given the historic order of this function (execution_date was first argument) to add a new optional
# param we need to add default values for everything :(
if dag_id is None:
raise TypeError("clear() missing required argument: dag_id")
if task_id is None:
raise TypeError("clear() missing required argument: task_id")
if not exactly_one(execution_date is not None, run_id is not None):
raise ValueError(
f"Exactly one of run_id or execution_date must be passed. "
f"Passed execution_date={execution_date}, run_id={run_id}"
)
if execution_date is not None:
message = "Passing 'execution_date' to 'XCom.clear()' is deprecated. Use 'run_id' instead."
warnings.warn(message, RemovedInAirflow3Warning, stacklevel=3)
run_id = (
session.query(DagRun.run_id)
.filter(DagRun.dag_id == dag_id, DagRun.execution_date == execution_date)
.scalar()
)
query = session.query(BaseXCom).filter_by(dag_id=dag_id, task_id=task_id, run_id=run_id)
if map_index is not None:
query = query.filter_by(map_index=map_index)
query.delete()
@staticmethod
def serialize_value(
value: Any,
*,
key: str | None = None,
task_id: str | None = None,
dag_id: str | None = None,
run_id: str | None = None,
map_index: int | None = None,
) -> Any:
"""Serialize XCom value to str or pickled object."""
if conf.getboolean("core", "enable_xcom_pickling"):
return pickle.dumps(value)
try:
return json.dumps(value, cls=XComEncoder).encode("UTF-8")
except (ValueError, TypeError) as ex:
log.error(
"%s."
" If you are using pickle instead of JSON for XCom,"
" then you need to enable pickle support for XCom"
" in your airflow config or make sure to decorate your"
" object with attr.",
ex,
)
raise
@staticmethod
def _deserialize_value(result: XCom, orm: bool) -> Any:
object_hook = None
if orm:
object_hook = XComDecoder.orm_object_hook
if result.value is None:
return None
if conf.getboolean("core", "enable_xcom_pickling"):
try:
return pickle.loads(result.value)
except pickle.UnpicklingError:
return json.loads(result.value.decode("UTF-8"), cls=XComDecoder, object_hook=object_hook)
else:
try:
return json.loads(result.value.decode("UTF-8"), cls=XComDecoder, object_hook=object_hook)
except (json.JSONDecodeError, UnicodeDecodeError):
return pickle.loads(result.value)
@staticmethod
def deserialize_value(result: XCom) -> Any:
"""Deserialize XCom value from str or pickle object."""
return BaseXCom._deserialize_value(result, False)
def orm_deserialize_value(self) -> Any:
"""
Deserialize method which is used to reconstruct ORM XCom object.
This method should be overridden in custom XCom backends to avoid
unnecessary request or other resource consuming operations when
creating XCom orm model. This is used when viewing XCom listing
in the webserver, for example.
"""
return BaseXCom._deserialize_value(self, True)
class _LazyXComAccessIterator(collections.abc.Iterator):
def __init__(self, cm: contextlib.AbstractContextManager[Query]) -> None:
self._cm = cm
self._entered = False
def __del__(self) -> None:
if self._entered:
self._cm.__exit__(None, None, None)
def __iter__(self) -> collections.abc.Iterator:
return self
def __next__(self) -> Any:
return XCom.deserialize_value(next(self._it))
@cached_property
def _it(self) -> collections.abc.Iterator:
self._entered = True
return iter(self._cm.__enter__())
@attr.define(slots=True)
class LazyXComAccess(collections.abc.Sequence):
"""Wrapper to lazily pull XCom with a sequence-like interface.
Note that since the session bound to the parent query may have died when we
actually access the sequence's content, we must create a new session
for every function call with ``with_session()``.
:meta private:
"""
_query: Query
_len: int | None = attr.ib(init=False, default=None)
@classmethod
def build_from_xcom_query(cls, query: Query) -> LazyXComAccess:
return cls(query=query.with_entities(XCom.value))
def __repr__(self) -> str:
return f"LazyXComAccess([{len(self)} items])"
def __str__(self) -> str:
return str(list(self))
def __eq__(self, other: Any) -> bool:
if isinstance(other, (list, LazyXComAccess)):
z = itertools.zip_longest(iter(self), iter(other), fillvalue=object())
return all(x == y for x, y in z)
return NotImplemented
def __getstate__(self) -> Any:
# We don't want to go to the trouble of serializing the entire Query
# object, including its filters, hints, etc. (plus SQLAlchemy does not
# provide a public API to inspect a query's contents). Converting the
# query into a SQL string is the best we can get. Theoratically we can
# do the same for count(), but I think it should be performant enough to
# calculate only that eagerly.
with self._get_bound_query() as query:
statement = query.statement.compile(
query.session.get_bind(),
# This inlines all the values into the SQL string to simplify
# cross-process commuinication as much as possible.
compile_kwargs={"literal_binds": True},
)
return (str(statement), query.count())
def __setstate__(self, state: Any) -> None:
statement, self._len = state
self._query = Query(XCom.value).from_statement(text(statement))
def __len__(self):
if self._len is None:
with self._get_bound_query() as query:
self._len = query.count()
return self._len
def __iter__(self):
return _LazyXComAccessIterator(self._get_bound_query())
def __getitem__(self, key):
if not isinstance(key, int):
raise ValueError("only support index access for now")
try:
with self._get_bound_query() as query:
r = query.offset(key).limit(1).one()
except NoResultFound:
raise IndexError(key) from None
return XCom.deserialize_value(r)
@contextlib.contextmanager
def _get_bound_query(self) -> Generator[Query, None, None]:
# Do we have a valid session already?
if self._query.session and self._query.session.is_active:
yield self._query
return
Session = getattr(settings, "Session", None)
if Session is None:
raise RuntimeError("Session must be set before!")
session = Session()
try:
yield self._query.with_session(session)
finally:
session.close()
def _patch_outdated_serializer(clazz: type[BaseXCom], params: Iterable[str]) -> None:
"""Patch a custom ``serialize_value`` to accept the modern signature.
To give custom XCom backends more flexibility with how they store values, we
now forward all params passed to ``XCom.set`` to ``XCom.serialize_value``.
In order to maintain compatibility with custom XCom backends written with
the old signature, we check the signature and, if necessary, patch with a
method that ignores kwargs the backend does not accept.
"""
old_serializer = clazz.serialize_value
@wraps(old_serializer)
def _shim(**kwargs):
kwargs = {k: kwargs.get(k) for k in params}
warnings.warn(
f"Method `serialize_value` in XCom backend {XCom.__name__} is using outdated signature and"
f"must be updated to accept all params in `BaseXCom.set` except `session`. Support will be "
f"removed in a future release.",
RemovedInAirflow3Warning,
)
return old_serializer(**kwargs)
clazz.serialize_value = _shim # type: ignore[assignment]
def _get_function_params(function) -> list[str]:
"""
Returns the list of variables names of a function.
:param function: The function to inspect
"""
parameters = inspect.signature(function).parameters
bound_arguments = [
name for name, p in parameters.items() if p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
]
return bound_arguments
def resolve_xcom_backend() -> type[BaseXCom]:
"""Resolves custom XCom class.
Confirms that custom XCom class extends the BaseXCom.
Compares the function signature of the custom XCom serialize_value to the base XCom serialize_value.
"""
clazz = conf.getimport("core", "xcom_backend", fallback=f"airflow.models.xcom.{BaseXCom.__name__}")
if not clazz:
return BaseXCom
if not issubclass(clazz, BaseXCom):
raise TypeError(
f"Your custom XCom class `{clazz.__name__}` is not a subclass of `{BaseXCom.__name__}`."
)
base_xcom_params = _get_function_params(BaseXCom.serialize_value)
xcom_params = _get_function_params(clazz.serialize_value)
if not set(base_xcom_params) == set(xcom_params):
_patch_outdated_serializer(clazz=clazz, params=xcom_params)
return clazz
if TYPE_CHECKING:
XCom = BaseXCom # Hack to avoid Mypy "Variable 'XCom' is not valid as a type".
else:
XCom = resolve_xcom_backend()
| 32,287 | 35.900571 | 109 |
py
|
airflow
|
airflow-main/airflow/models/taskmixin.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Iterable, Sequence
import pendulum
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.serialization.enums import DagAttributeTypes
from airflow.utils.setup_teardown import SetupTeardownContext
from airflow.utils.types import NOTSET, ArgNotSet
if TYPE_CHECKING:
from logging import Logger
from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG
from airflow.models.operator import Operator
from airflow.utils.edgemodifier import EdgeModifier
from airflow.utils.task_group import TaskGroup
class DependencyMixin:
"""Mixing implementing common dependency setting methods methods like >> and <<."""
@property
def roots(self) -> Sequence[DependencyMixin]:
"""
List of root nodes -- ones with no upstream dependencies.
a.k.a. the "start" of this sub-graph
"""
raise NotImplementedError()
@property
def leaves(self) -> Sequence[DependencyMixin]:
"""
List of leaf nodes -- ones with only upstream dependencies.
a.k.a. the "end" of this sub-graph
"""
raise NotImplementedError()
@abstractmethod
def set_upstream(
self, other: DependencyMixin | Sequence[DependencyMixin], edge_modifier: EdgeModifier | None = None
):
"""Set a task or a task list to be directly upstream from the current task."""
raise NotImplementedError()
@abstractmethod
def set_downstream(
self, other: DependencyMixin | Sequence[DependencyMixin], edge_modifier: EdgeModifier | None = None
):
"""Set a task or a task list to be directly downstream from the current task."""
raise NotImplementedError()
def as_setup(self) -> DependencyMixin:
"""Mark a task as setup task."""
raise NotImplementedError()
def as_teardown(
self,
*,
setups: BaseOperator | Iterable[BaseOperator] | ArgNotSet = NOTSET,
on_failure_fail_dagrun=NOTSET,
) -> DependencyMixin:
"""Mark a task as teardown and set its setups as direct relatives."""
raise NotImplementedError()
def update_relative(
self, other: DependencyMixin, upstream: bool = True, edge_modifier: EdgeModifier | None = None
) -> None:
"""
Update relationship information about another TaskMixin. Default is no-op.
Override if necessary.
"""
def __lshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Implements Task << Task."""
self.set_upstream(other)
self.set_setup_teardown_ctx_dependencies(other)
self.set_taskgroup_ctx_dependencies(other)
return other
def __rshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Implements Task >> Task."""
self.set_downstream(other)
self.set_setup_teardown_ctx_dependencies(other)
self.set_taskgroup_ctx_dependencies(other)
return other
def __rrshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Called for Task >> [Task] because list don't have __rshift__ operators."""
self.__lshift__(other)
return self
def __rlshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Called for Task << [Task] because list don't have __lshift__ operators."""
self.__rshift__(other)
return self
@abstractmethod
def add_to_taskgroup(self, task_group: TaskGroup) -> None:
"""Add the task to the given task group."""
raise NotImplementedError()
@classmethod
def _iter_references(cls, obj: Any) -> Iterable[tuple[DependencyMixin, str]]:
from airflow.models.baseoperator import AbstractOperator
from airflow.utils.mixins import ResolveMixin
if isinstance(obj, AbstractOperator):
yield obj, "operator"
elif isinstance(obj, ResolveMixin):
yield from obj.iter_references()
elif isinstance(obj, Sequence):
for o in obj:
yield from cls._iter_references(o)
def set_setup_teardown_ctx_dependencies(self, other: DependencyMixin | Sequence[DependencyMixin]):
if not SetupTeardownContext.active:
return
for op, _ in self._iter_references([self, other]):
SetupTeardownContext.update_context_map(op)
def set_taskgroup_ctx_dependencies(self, other: DependencyMixin | Sequence[DependencyMixin]):
from airflow.utils.task_group import TaskGroupContext
if not TaskGroupContext.active:
return
task_group = TaskGroupContext.get_current_task_group(None)
for op, _ in self._iter_references([self, other]):
if task_group:
op.add_to_taskgroup(task_group)
class TaskMixin(DependencyMixin):
"""Mixin to provide task-related things.
:meta private:
"""
def __init_subclass__(cls) -> None:
warnings.warn(
f"TaskMixin has been renamed to DependencyMixin, please update {cls.__name__}",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
return super().__init_subclass__()
class DAGNode(DependencyMixin, metaclass=ABCMeta):
"""
A base class for a node in the graph of a workflow -- an Operator or a Task Group, either mapped or
unmapped.
"""
dag: DAG | None = None
task_group: TaskGroup | None = None
"""The task_group that contains this node"""
@property
@abstractmethod
def node_id(self) -> str:
raise NotImplementedError()
@property
def label(self) -> str | None:
tg = self.task_group
if tg and tg.node_id and tg.prefix_group_id:
# "task_group_id.task_id" -> "task_id"
return self.node_id[len(tg.node_id) + 1 :]
return self.node_id
start_date: pendulum.DateTime | None
end_date: pendulum.DateTime | None
upstream_task_ids: set[str]
downstream_task_ids: set[str]
def has_dag(self) -> bool:
return self.dag is not None
@property
def dag_id(self) -> str:
"""Returns dag id if it has one or an adhoc/meaningless ID."""
if self.dag:
return self.dag.dag_id
return "_in_memory_dag_"
@property
def log(self) -> Logger:
raise NotImplementedError()
@property
@abstractmethod
def roots(self) -> Sequence[DAGNode]:
raise NotImplementedError()
@property
@abstractmethod
def leaves(self) -> Sequence[DAGNode]:
raise NotImplementedError()
def _set_relatives(
self,
task_or_task_list: DependencyMixin | Sequence[DependencyMixin],
upstream: bool = False,
edge_modifier: EdgeModifier | None = None,
) -> None:
"""Sets relatives for the task or task list."""
from airflow.models.baseoperator import BaseOperator
from airflow.models.mappedoperator import MappedOperator
if not isinstance(task_or_task_list, Sequence):
task_or_task_list = [task_or_task_list]
task_list: list[Operator] = []
for task_object in task_or_task_list:
task_object.update_relative(self, not upstream, edge_modifier=edge_modifier)
relatives = task_object.leaves if upstream else task_object.roots
for task in relatives:
if not isinstance(task, (BaseOperator, MappedOperator)):
raise AirflowException(
f"Relationships can only be set between Operators; received {task.__class__.__name__}"
)
task_list.append(task)
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags: set[DAG] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
if len(dags) > 1:
raise AirflowException(f"Tried to set relationships between tasks in more than one DAG: {dags}")
elif len(dags) == 1:
dag = dags.pop()
else:
raise AirflowException(
f"Tried to create relationships between tasks that don't have DAGs yet. "
f"Set the DAG for at least one task and try again: {[self, *task_list]}"
)
if not self.has_dag():
# If this task does not yet have a dag, add it to the same dag as the other task.
self.dag = dag
for task in task_list:
if dag and not task.has_dag():
# If the other task does not yet have a dag, add it to the same dag as this task and
dag.add_task(task)
if upstream:
task.downstream_task_ids.add(self.node_id)
self.upstream_task_ids.add(task.node_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, task.node_id, self.node_id)
else:
self.downstream_task_ids.add(task.node_id)
task.upstream_task_ids.add(self.node_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, self.node_id, task.node_id)
def set_downstream(
self,
task_or_task_list: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
) -> None:
"""Set a node (or nodes) to be directly downstream from the current node."""
self._set_relatives(task_or_task_list, upstream=False, edge_modifier=edge_modifier)
def set_upstream(
self,
task_or_task_list: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
) -> None:
"""Set a node (or nodes) to be directly upstream from the current node."""
self._set_relatives(task_or_task_list, upstream=True, edge_modifier=edge_modifier)
@property
def downstream_list(self) -> Iterable[Operator]:
"""List of nodes directly downstream."""
if not self.dag:
raise AirflowException(f"Operator {self} has not been assigned to a DAG yet")
return [self.dag.get_task(tid) for tid in self.downstream_task_ids]
@property
def upstream_list(self) -> Iterable[Operator]:
"""List of nodes directly upstream."""
if not self.dag:
raise AirflowException(f"Operator {self} has not been assigned to a DAG yet")
return [self.dag.get_task(tid) for tid in self.upstream_task_ids]
def get_direct_relative_ids(self, upstream: bool = False) -> set[str]:
"""
Get set of the direct relative ids to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_task_ids
else:
return self.downstream_task_ids
def get_direct_relatives(self, upstream: bool = False) -> Iterable[DAGNode]:
"""
Get list of the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def serialize_for_task_group(self) -> tuple[DagAttributeTypes, Any]:
"""This is used by TaskGroupSerialization to serialize a task group's content."""
raise NotImplementedError()
| 12,294 | 36.033133 | 110 |
py
|
airflow
|
airflow-main/airflow/models/variable.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import logging
from typing import Any
from sqlalchemy import Boolean, Column, Integer, String, Text, delete
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.orm import Session, declared_attr, reconstructor, synonym
from airflow.api_internal.internal_api_call import internal_api_call
from airflow.configuration import ensure_secrets_loaded
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.secrets.metastore import MetastoreBackend
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.secrets_masker import mask_secret
from airflow.utils.session import provide_session
log = logging.getLogger(__name__)
class Variable(Base, LoggingMixin):
"""
Variables are a generic way to store and retrieve arbitrary content or settings
as a simple key value store within Airflow.
"""
__tablename__ = "variable"
__NO_DEFAULT_SENTINEL = object()
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
_val = Column("val", Text().with_variant(MEDIUMTEXT, "mysql"))
description = Column(Text)
is_encrypted = Column(Boolean, unique=False, default=False)
def __init__(self, key=None, val=None, description=None):
super().__init__()
self.key = key
self.val = val
self.description = description
@reconstructor
def on_db_load(self):
if self._val:
mask_secret(self.val, self.key)
def __repr__(self):
# Hiding the value
return f"{self.key} : {self._val}"
def get_val(self):
"""Get Airflow Variable from Metadata DB and decode it using the Fernet Key."""
from cryptography.fernet import InvalidToken as InvalidFernetToken
if self._val is not None and self.is_encrypted:
try:
fernet = get_fernet()
return fernet.decrypt(bytes(self._val, "utf-8")).decode()
except InvalidFernetToken:
self.log.error("Can't decrypt _val for key=%s, invalid token or value", self.key)
return None
except Exception:
self.log.error("Can't decrypt _val for key=%s, FERNET_KEY configuration missing", self.key)
return None
else:
return self._val
def set_val(self, value):
"""Encode the specified value with Fernet Key and store it in Variables Table."""
if value is not None:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, "utf-8")).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def val(cls):
"""Get Airflow Variable from Metadata DB and decode it using the Fernet Key."""
return synonym("_val", descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, description=None, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:param default: Default value to set and return if the variable
isn't already in the DB
:param description: Default value to set Description of the Variable
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, description=description, serialize_json=deserialize_json)
return default
else:
raise ValueError("Default Value must be set")
else:
return obj
@classmethod
def get(
cls,
key: str,
default_var: Any = __NO_DEFAULT_SENTINEL,
deserialize_json: bool = False,
) -> Any:
"""Gets a value for an Airflow Variable Key.
:param key: Variable Key
:param default_var: Default value of the Variable if the Variable doesn't exist
:param deserialize_json: Deserialize the value to a Python dict
"""
var_val = Variable.get_variable_from_secrets(key=key)
if var_val is None:
if default_var is not cls.__NO_DEFAULT_SENTINEL:
return default_var
else:
raise KeyError(f"Variable {key} does not exist")
else:
if deserialize_json:
obj = json.loads(var_val)
mask_secret(obj, key)
return obj
else:
mask_secret(var_val, key)
return var_val
@staticmethod
@provide_session
@internal_api_call
def set(
key: str,
value: Any,
description: str | None = None,
serialize_json: bool = False,
session: Session = None,
) -> None:
"""Sets a value for an Airflow Variable with a given Key.
This operation overwrites an existing variable.
:param key: Variable Key
:param value: Value to set for the Variable
:param description: Description of the Variable
:param serialize_json: Serialize the value to a JSON string
"""
# check if the secret exists in the custom secrets' backend.
Variable.check_for_write_conflict(key)
if serialize_json:
stored_value = json.dumps(value, indent=2)
else:
stored_value = str(value)
Variable.delete(key, session=session)
session.add(Variable(key=key, val=stored_value, description=description))
session.flush()
@staticmethod
@provide_session
@internal_api_call
def update(
key: str,
value: Any,
serialize_json: bool = False,
session: Session = None,
) -> None:
"""Updates a given Airflow Variable with the Provided value.
:param key: Variable Key
:param value: Value to set for the Variable
:param serialize_json: Serialize the value to a JSON string
"""
Variable.check_for_write_conflict(key)
if Variable.get_variable_from_secrets(key=key) is None:
raise KeyError(f"Variable {key} does not exist")
obj = session.query(Variable).filter(Variable.key == key).first()
if obj is None:
raise AttributeError(f"Variable {key} does not exist in the Database and cannot be updated.")
Variable.set(key, value, description=obj.description, serialize_json=serialize_json)
@staticmethod
@provide_session
@internal_api_call
def delete(key: str, session: Session = None) -> int:
"""Delete an Airflow Variable for a given key.
:param key: Variable Keys
"""
return session.execute(delete(Variable).where(Variable.key == key)).rowcount
def rotate_fernet_key(self):
"""Rotate Fernet Key."""
fernet = get_fernet()
if self._val and self.is_encrypted:
self._val = fernet.rotate(self._val.encode("utf-8")).decode()
@staticmethod
def check_for_write_conflict(key: str) -> None:
"""Logs a warning if a variable exists outside of the metastore.
If we try to write a variable to the metastore while the same key
exists in an environment variable or custom secrets backend, then
subsequent reads will not read the set value.
:param key: Variable Key
"""
for secrets_backend in ensure_secrets_loaded():
if not isinstance(secrets_backend, MetastoreBackend):
try:
var_val = secrets_backend.get_variable(key=key)
if var_val is not None:
log.warning(
"The variable {key} is defined in the {cls} secrets backend, which takes "
"precedence over reading from the database. The value in the database will be "
"updated, but to read it you have to delete the conflicting variable "
"from {cls}".format(key=key, cls=secrets_backend.__class__.__name__)
)
return
except Exception:
log.exception(
"Unable to retrieve variable from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
return None
@staticmethod
def get_variable_from_secrets(key: str) -> str | None:
"""
Get Airflow Variable by iterating over all Secret Backends.
:param key: Variable Key
:return: Variable Value
"""
for secrets_backend in ensure_secrets_loaded():
try:
var_val = secrets_backend.get_variable(key=key)
if var_val is not None:
return var_val
except Exception:
log.exception(
"Unable to retrieve variable from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
return None
| 10,315 | 36.787546 | 107 |
py
|
airflow
|
airflow-main/airflow/models/operator.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Union
from airflow.models.abstractoperator import AbstractOperator
from airflow.models.baseoperator import BaseOperator
from airflow.models.mappedoperator import MappedOperator
from airflow.typing_compat import TypeGuard
Operator = Union[BaseOperator, MappedOperator]
def needs_expansion(task: AbstractOperator) -> TypeGuard[Operator]:
"""Whether a task needs expansion at runtime.
A task needs expansion if it either
* Is a mapped operator, or
* Is in a mapped task group.
This is implemented as a free function (instead of a property) so we can
make it a type guard.
"""
if isinstance(task, MappedOperator):
return True
if task.get_closest_mapped_task_group() is not None:
return True
return False
__all__ = ["Operator", "needs_expansion"]
| 1,649 | 32.673469 | 76 |
py
|
airflow
|
airflow-main/airflow/models/tasklog.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import Column, Integer, Text
from airflow.models.base import Base
from airflow.utils import timezone
from airflow.utils.sqlalchemy import UtcDateTime
class LogTemplate(Base):
"""Changes to ``log_filename_template`` and ``elasticsearch_id``.
This table is automatically populated when Airflow starts up, to store the
config's value if it does not match the last row in the table.
"""
__tablename__ = "log_template"
id = Column(Integer, primary_key=True, autoincrement=True)
filename = Column(Text, nullable=False)
elasticsearch_id = Column(Text, nullable=False)
created_at = Column(UtcDateTime, nullable=False, default=timezone.utcnow)
def __repr__(self) -> str:
attrs = ", ".join(f"{k}={getattr(self, k)}" for k in ("filename", "elasticsearch_id"))
return f"LogTemplate({attrs})"
| 1,682 | 37.25 | 94 |
py
|
airflow
|
airflow-main/airflow/models/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Airflow models."""
from __future__ import annotations
# Do not add new models to this -- this is for compat only
__all__ = [
"DAG",
"ID_LEN",
"Base",
"BaseOperator",
"BaseOperatorLink",
"Connection",
"DagBag",
"DagWarning",
"DagModel",
"DagPickle",
"DagRun",
"DagTag",
"DbCallbackRequest",
"ImportError",
"Log",
"MappedOperator",
"Operator",
"Param",
"Pool",
"RenderedTaskInstanceFields",
"SkipMixin",
"SlaMiss",
"TaskFail",
"TaskInstance",
"TaskReschedule",
"Trigger",
"Variable",
"XCom",
"clear_task_instances",
]
from typing import TYPE_CHECKING
def import_all_models():
for name in __lazy_imports:
__getattr__(name)
import airflow.jobs.job
import airflow.models.dagwarning
import airflow.models.dataset
import airflow.models.serialized_dag
import airflow.models.tasklog
import airflow.www.fab_security.sqla.models
def __getattr__(name):
# PEP-562: Lazy loaded attributes on python modules
path = __lazy_imports.get(name)
if not path:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
from airflow.utils.module_loading import import_string
val = import_string(f"{path}.{name}")
# Store for next time
globals()[name] = val
return val
__lazy_imports = {
"DAG": "airflow.models.dag",
"ID_LEN": "airflow.models.base",
"Base": "airflow.models.base",
"BaseOperator": "airflow.models.baseoperator",
"BaseOperatorLink": "airflow.models.baseoperator",
"Connection": "airflow.models.connection",
"DagBag": "airflow.models.dagbag",
"DagModel": "airflow.models.dag",
"DagPickle": "airflow.models.dagpickle",
"DagRun": "airflow.models.dagrun",
"DagTag": "airflow.models.dag",
"DbCallbackRequest": "airflow.models.db_callback_request",
"ImportError": "airflow.models.errors",
"Log": "airflow.models.log",
"MappedOperator": "airflow.models.mappedoperator",
"Operator": "airflow.models.operator",
"Param": "airflow.models.param",
"Pool": "airflow.models.pool",
"RenderedTaskInstanceFields": "airflow.models.renderedtifields",
"SkipMixin": "airflow.models.skipmixin",
"SlaMiss": "airflow.models.slamiss",
"TaskFail": "airflow.models.taskfail",
"TaskInstance": "airflow.models.taskinstance",
"TaskReschedule": "airflow.models.taskreschedule",
"Trigger": "airflow.models.trigger",
"Variable": "airflow.models.variable",
"XCom": "airflow.models.xcom",
"clear_task_instances": "airflow.models.taskinstance",
}
if TYPE_CHECKING:
# I was unable to get mypy to respect a airflow/models/__init__.pyi, so
# having to resort back to this hacky method
from airflow.models.base import ID_LEN, Base
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.connection import Connection
from airflow.models.dag import DAG, DagModel, DagTag
from airflow.models.dagbag import DagBag
from airflow.models.dagpickle import DagPickle
from airflow.models.dagrun import DagRun
from airflow.models.db_callback_request import DbCallbackRequest
from airflow.models.errors import ImportError
from airflow.models.log import Log
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
from airflow.models.param import Param
from airflow.models.pool import Pool
from airflow.models.renderedtifields import RenderedTaskInstanceFields
from airflow.models.skipmixin import SkipMixin
from airflow.models.slamiss import SlaMiss
from airflow.models.taskfail import TaskFail
from airflow.models.taskinstance import TaskInstance, clear_task_instances
from airflow.models.taskreschedule import TaskReschedule
from airflow.models.trigger import Trigger
from airflow.models.variable import Variable
from airflow.models.xcom import XCom
| 4,789 | 32.971631 | 78 |
py
|
airflow
|
airflow-main/airflow/models/crypto.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.typing_compat import Protocol
log = logging.getLogger(__name__)
class FernetProtocol(Protocol):
"""This class is only used for TypeChecking (for IDEs, mypy, etc)."""
def decrypt(self, b):
"""Decrypt with Fernet."""
def encrypt(self, b):
"""Encrypt with Fernet."""
class NullFernet:
"""
A "Null" encryptor class that doesn't encrypt or decrypt but that presents
a similar interface to Fernet.
The purpose of this is to make the rest of the code not have to know the
difference, and to only display the message once, not 20 times when
`airflow db init` is ran.
"""
is_encrypted = False
def decrypt(self, b):
"""Decrypt with Fernet."""
return b
def encrypt(self, b):
"""Encrypt with Fernet."""
return b
_fernet: FernetProtocol | None = None
def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
"""
from cryptography.fernet import Fernet, MultiFernet
global _fernet
if _fernet:
return _fernet
try:
fernet_key = conf.get("core", "FERNET_KEY")
if not fernet_key:
log.warning("empty cryptography key - values will not be stored encrypted.")
_fernet = NullFernet()
else:
_fernet = MultiFernet(
[Fernet(fernet_part.encode("utf-8")) for fernet_part in fernet_key.split(",")]
)
_fernet.is_encrypted = True
except (ValueError, TypeError) as value_error:
raise AirflowException(f"Could not create Fernet object: {value_error}")
return _fernet
| 2,775 | 28.531915 | 94 |
py
|
airflow
|
airflow-main/airflow/models/serialized_dag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Serialized DAG table in database."""
from __future__ import annotations
import logging
import zlib
from datetime import datetime, timedelta
from typing import Collection
import sqlalchemy_jsonfield
from sqlalchemy import BigInteger, Column, Index, LargeBinary, String, and_, or_
from sqlalchemy.orm import Session, backref, foreign, relationship
from sqlalchemy.sql.expression import func, literal
from airflow.models.base import ID_LEN, Base
from airflow.models.dag import DAG, DagModel
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import DagRun
from airflow.serialization.serialized_objects import DagDependency, SerializedDAG
from airflow.settings import COMPRESS_SERIALIZED_DAGS, MIN_SERIALIZED_DAG_UPDATE_INTERVAL, json
from airflow.utils import timezone
from airflow.utils.hashlib_wrapper import md5
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
log = logging.getLogger(__name__)
class SerializedDagModel(Base):
"""A table for serialized DAGs.
serialized_dag table is a snapshot of DAG files synchronized by scheduler.
This feature is controlled by:
* ``[core] min_serialized_dag_update_interval = 30`` (s):
serialized DAGs are updated in DB when a file gets processed by scheduler,
to reduce DB write rate, there is a minimal interval of updating serialized DAGs.
* ``[scheduler] dag_dir_list_interval = 300`` (s):
interval of deleting serialized DAGs in DB when the files are deleted, suggest
to use a smaller interval such as 60
* ``[core] compress_serialized_dags``:
whether compressing the dag data to the Database.
It is used by webserver to load dags
because reading from database is lightweight compared to importing from files,
it solves the webserver scalability issue.
"""
__tablename__ = "serialized_dag"
dag_id = Column(String(ID_LEN), primary_key=True)
fileloc = Column(String(2000), nullable=False)
# The max length of fileloc exceeds the limit of indexing.
fileloc_hash = Column(BigInteger(), nullable=False)
_data = Column("data", sqlalchemy_jsonfield.JSONField(json=json), nullable=True)
_data_compressed = Column("data_compressed", LargeBinary, nullable=True)
last_updated = Column(UtcDateTime, nullable=False)
dag_hash = Column(String(32), nullable=False)
processor_subdir = Column(String(2000), nullable=True)
__table_args__ = (Index("idx_fileloc_hash", fileloc_hash, unique=False),)
dag_runs = relationship(
DagRun,
primaryjoin=dag_id == foreign(DagRun.dag_id), # type: ignore
backref=backref("serialized_dag", uselist=False, innerjoin=True),
)
dag_model = relationship(
DagModel,
primaryjoin=dag_id == DagModel.dag_id, # type: ignore
foreign_keys=dag_id,
uselist=False,
innerjoin=True,
backref=backref("serialized_dag", uselist=False, innerjoin=True),
)
load_op_links = True
def __init__(self, dag: DAG, processor_subdir: str | None = None) -> None:
self.dag_id = dag.dag_id
self.fileloc = dag.fileloc
self.fileloc_hash = DagCode.dag_fileloc_hash(self.fileloc)
self.last_updated = timezone.utcnow()
self.processor_subdir = processor_subdir
dag_data = SerializedDAG.to_dict(dag)
dag_data_json = json.dumps(dag_data, sort_keys=True).encode("utf-8")
self.dag_hash = md5(dag_data_json).hexdigest()
if COMPRESS_SERIALIZED_DAGS:
self._data = None
self._data_compressed = zlib.compress(dag_data_json)
else:
self._data = dag_data
self._data_compressed = None
# serve as cache so no need to decompress and load, when accessing data field
# when COMPRESS_SERIALIZED_DAGS is True
self.__data_cache = dag_data
def __repr__(self) -> str:
return f"<SerializedDag: {self.dag_id}>"
@classmethod
@provide_session
def write_dag(
cls,
dag: DAG,
min_update_interval: int | None = None,
processor_subdir: str | None = None,
session: Session = NEW_SESSION,
) -> bool:
"""Serializes a DAG and writes it into database.
If the record already exists, it checks if the Serialized DAG changed or not. If it is
changed, it updates the record, ignores otherwise.
:param dag: a DAG to be written into database
:param min_update_interval: minimal interval in seconds to update serialized DAG
:param session: ORM Session
:returns: Boolean indicating if the DAG was written to the DB
"""
# Checks if (Current Time - Time when the DAG was written to DB) < min_update_interval
# If Yes, does nothing
# If No or the DAG does not exists, updates / writes Serialized DAG to DB
if min_update_interval is not None:
if (
session.query(literal(True))
.filter(
and_(
cls.dag_id == dag.dag_id,
(timezone.utcnow() - timedelta(seconds=min_update_interval)) < cls.last_updated,
)
)
.scalar()
):
return False
log.debug("Checking if DAG (%s) changed", dag.dag_id)
new_serialized_dag = cls(dag, processor_subdir)
serialized_dag_db = (
session.query(cls.dag_hash, cls.processor_subdir).filter(cls.dag_id == dag.dag_id).first()
)
if (
serialized_dag_db is not None
and serialized_dag_db.dag_hash == new_serialized_dag.dag_hash
and serialized_dag_db.processor_subdir == new_serialized_dag.processor_subdir
):
log.debug("Serialized DAG (%s) is unchanged. Skipping writing to DB", dag.dag_id)
return False
log.debug("Writing Serialized DAG: %s to the DB", dag.dag_id)
session.merge(new_serialized_dag)
log.debug("DAG: %s written to the DB", dag.dag_id)
return True
@classmethod
@provide_session
def read_all_dags(cls, session: Session = NEW_SESSION) -> dict[str, SerializedDAG]:
"""Reads all DAGs in serialized_dag table.
:param session: ORM Session
:returns: a dict of DAGs read from database
"""
serialized_dags = session.query(cls)
dags = {}
for row in serialized_dags:
log.debug("Deserializing DAG: %s", row.dag_id)
dag = row.dag
# Coherence check
if dag.dag_id == row.dag_id:
dags[row.dag_id] = dag
else:
log.warning(
"dag_id Mismatch in DB: Row with dag_id '%s' has Serialised DAG with '%s' dag_id",
row.dag_id,
dag.dag_id,
)
return dags
@property
def data(self) -> dict | None:
# use __data_cache to avoid decompress and loads
if not hasattr(self, "__data_cache") or self.__data_cache is None:
if self._data_compressed:
self.__data_cache = json.loads(zlib.decompress(self._data_compressed))
else:
self.__data_cache = self._data
return self.__data_cache
@property
def dag(self) -> SerializedDAG:
"""The DAG deserialized from the ``data`` column."""
SerializedDAG._load_operator_extra_links = self.load_op_links
if isinstance(self.data, dict):
data = self.data
elif isinstance(self.data, str):
data = json.loads(self.data)
else:
raise ValueError("invalid or missing serialized DAG data")
return SerializedDAG.from_dict(data)
@classmethod
@provide_session
def remove_dag(cls, dag_id: str, session: Session = NEW_SESSION) -> None:
"""Deletes a DAG with given dag_id.
:param dag_id: dag_id to be deleted
:param session: ORM Session.
"""
session.execute(cls.__table__.delete().where(cls.dag_id == dag_id))
@classmethod
@provide_session
def remove_deleted_dags(
cls,
alive_dag_filelocs: Collection[str],
processor_subdir: str | None = None,
session: Session = NEW_SESSION,
) -> None:
"""Deletes DAGs not included in alive_dag_filelocs.
:param alive_dag_filelocs: file paths of alive DAGs
:param session: ORM Session
"""
alive_fileloc_hashes = [DagCode.dag_fileloc_hash(fileloc) for fileloc in alive_dag_filelocs]
log.debug(
"Deleting Serialized DAGs (for which DAG files are deleted) from %s table ", cls.__tablename__
)
session.execute(
cls.__table__.delete().where(
and_(
cls.fileloc_hash.notin_(alive_fileloc_hashes),
cls.fileloc.notin_(alive_dag_filelocs),
or_(
cls.processor_subdir.is_(None),
cls.processor_subdir == processor_subdir,
),
)
)
)
@classmethod
@provide_session
def has_dag(cls, dag_id: str, session: Session = NEW_SESSION) -> bool:
"""Checks a DAG exist in serialized_dag table.
:param dag_id: the DAG to check
:param session: ORM Session
"""
return session.query(literal(True)).filter(cls.dag_id == dag_id).first() is not None
@classmethod
@provide_session
def get_dag(cls, dag_id: str, session: Session = NEW_SESSION) -> SerializedDAG | None:
row = cls.get(dag_id, session=session)
if row:
return row.dag
return None
@classmethod
@provide_session
def get(cls, dag_id: str, session: Session = NEW_SESSION) -> SerializedDagModel | None:
"""
Get the SerializedDAG for the given dag ID.
It will cope with being passed the ID of a subdag by looking up the
root dag_id from the DAG table.
:param dag_id: the DAG to fetch
:param session: ORM Session
"""
row = session.query(cls).filter(cls.dag_id == dag_id).one_or_none()
if row:
return row
# If we didn't find a matching DAG id then ask the DAG table to find
# out the root dag
root_dag_id = session.query(DagModel.root_dag_id).filter(DagModel.dag_id == dag_id).scalar()
return session.query(cls).filter(cls.dag_id == root_dag_id).one_or_none()
@staticmethod
@provide_session
def bulk_sync_to_db(
dags: list[DAG],
processor_subdir: str | None = None,
session: Session = NEW_SESSION,
) -> None:
"""
Saves DAGs as Serialized DAG objects in the database. Each
DAG is saved in a separate database query.
:param dags: the DAG objects to save to the DB
:param session: ORM Session
:return: None
"""
for dag in dags:
if not dag.is_subdag:
SerializedDagModel.write_dag(
dag=dag,
min_update_interval=MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
processor_subdir=processor_subdir,
session=session,
)
@classmethod
@provide_session
def get_last_updated_datetime(cls, dag_id: str, session: Session = NEW_SESSION) -> datetime | None:
"""
Get the date when the Serialized DAG associated to DAG was last updated
in serialized_dag table.
:param dag_id: DAG ID
:param session: ORM Session
"""
return session.query(cls.last_updated).filter(cls.dag_id == dag_id).scalar()
@classmethod
@provide_session
def get_max_last_updated_datetime(cls, session: Session = NEW_SESSION) -> datetime | None:
"""
Get the maximum date when any DAG was last updated in serialized_dag table.
:param session: ORM Session
"""
return session.query(func.max(cls.last_updated)).scalar()
@classmethod
@provide_session
def get_latest_version_hash(cls, dag_id: str, session: Session = NEW_SESSION) -> str | None:
"""
Get the latest DAG version for a given DAG ID.
:param dag_id: DAG ID
:param session: ORM Session
:return: DAG Hash, or None if the DAG is not found
"""
return session.query(cls.dag_hash).filter(cls.dag_id == dag_id).scalar()
@classmethod
def get_latest_version_hash_and_updated_datetime(
cls,
dag_id: str,
*,
session: Session,
) -> tuple[str, datetime] | None:
"""
Get the latest DAG version for a given DAG ID, as well as the date when the Serialized DAG associated
to DAG was last updated in serialized_dag table.
:meta private:
:param dag_id: DAG ID
:param session: ORM Session
:return: A tuple of DAG Hash and last updated datetime, or None if the DAG is not found
"""
return session.query(cls.dag_hash, cls.last_updated).filter(cls.dag_id == dag_id).one_or_none()
@classmethod
@provide_session
def get_dag_dependencies(cls, session: Session = NEW_SESSION) -> dict[str, list[DagDependency]]:
"""
Get the dependencies between DAGs.
:param session: ORM Session
"""
if session.bind.dialect.name in ["sqlite", "mysql"]:
query = session.query(cls.dag_id, func.json_extract(cls._data, "$.dag.dag_dependencies"))
iterator = ((dag_id, json.loads(deps_data) if deps_data else []) for dag_id, deps_data in query)
elif session.bind.dialect.name == "mssql":
query = session.query(cls.dag_id, func.json_query(cls._data, "$.dag.dag_dependencies"))
iterator = ((dag_id, json.loads(deps_data) if deps_data else []) for dag_id, deps_data in query)
else:
iterator = session.query(cls.dag_id, func.json_extract_path(cls._data, "dag", "dag_dependencies"))
return {dag_id: [DagDependency(**d) for d in (deps_data or [])] for dag_id, deps_data in iterator}
| 15,036 | 36.781407 | 110 |
py
|
airflow
|
airflow-main/airflow/models/pool.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from sqlalchemy import Column, Integer, String, Text, func, select
from sqlalchemy.orm.session import Session
from airflow.exceptions import AirflowException, PoolNotFound
from airflow.models.base import Base
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.typing_compat import TypedDict
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import nowait, with_row_locks
from airflow.utils.state import TaskInstanceState
class PoolStats(TypedDict):
"""Dictionary containing Pool Stats."""
total: int
running: int
queued: int
open: int
class Pool(Base):
"""the class to get Pool info."""
__tablename__ = "slot_pool"
id = Column(Integer, primary_key=True)
pool = Column(String(256), unique=True)
# -1 for infinite
slots = Column(Integer, default=0)
description = Column(Text)
DEFAULT_POOL_NAME = "default_pool"
def __repr__(self):
return str(self.pool)
@staticmethod
@provide_session
def get_pools(session: Session = NEW_SESSION) -> list[Pool]:
"""Get all pools."""
return session.scalars(select(Pool)).all()
@staticmethod
@provide_session
def get_pool(pool_name: str, session: Session = NEW_SESSION) -> Pool | None:
"""
Get the Pool with specific pool name from the Pools.
:param pool_name: The pool name of the Pool to get.
:param session: SQLAlchemy ORM Session
:return: the pool object
"""
return session.scalar(select(Pool).where(Pool.pool == pool_name))
@staticmethod
@provide_session
def get_default_pool(session: Session = NEW_SESSION) -> Pool | None:
"""
Get the Pool of the default_pool from the Pools.
:param session: SQLAlchemy ORM Session
:return: the pool object
"""
return Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session)
@staticmethod
@provide_session
def is_default_pool(id: int, session: Session = NEW_SESSION) -> bool:
"""
Check id if is the default_pool.
:param id: pool id
:param session: SQLAlchemy ORM Session
:return: True if id is default_pool, otherwise False
"""
return (
session.scalar(
select(func.count(Pool.id)).where(Pool.id == id, Pool.pool == Pool.DEFAULT_POOL_NAME)
)
> 0
)
@staticmethod
@provide_session
def create_or_update_pool(
name: str,
slots: int,
description: str,
session: Session = NEW_SESSION,
) -> Pool:
"""Create a pool with given parameters or update it if it already exists."""
if not name:
raise ValueError("Pool name must not be empty")
pool = session.scalar(select(Pool).filter_by(pool=name))
if pool is None:
pool = Pool(pool=name, slots=slots, description=description)
session.add(pool)
else:
pool.slots = slots
pool.description = description
session.commit()
return pool
@staticmethod
@provide_session
def delete_pool(name: str, session: Session = NEW_SESSION) -> Pool:
"""Delete pool by a given name."""
if name == Pool.DEFAULT_POOL_NAME:
raise AirflowException(f"{Pool.DEFAULT_POOL_NAME} cannot be deleted")
pool = session.scalar(select(Pool).filter_by(pool=name))
if pool is None:
raise PoolNotFound(f"Pool '{name}' doesn't exist")
session.delete(pool)
session.commit()
return pool
@staticmethod
@provide_session
def slots_stats(
*,
lock_rows: bool = False,
session: Session = NEW_SESSION,
) -> dict[str, PoolStats]:
"""
Get Pool stats (Number of Running, Queued, Open & Total tasks).
If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a
non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an
OperationalError.
:param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns
:param session: SQLAlchemy ORM Session
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
pools: dict[str, PoolStats] = {}
query = select(Pool.pool, Pool.slots)
if lock_rows:
query = with_row_locks(query, session=session, **nowait(session))
pool_rows = session.execute(query)
for (pool_name, total_slots) in pool_rows:
if total_slots == -1:
total_slots = float("inf") # type: ignore
pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)
state_count_by_pool = session.execute(
select(TaskInstance.pool, TaskInstance.state, func.sum(TaskInstance.pool_slots))
.filter(TaskInstance.state.in_(EXECUTION_STATES))
.group_by(TaskInstance.pool, TaskInstance.state)
)
# calculate queued and running metrics
for pool_name, state, count in state_count_by_pool:
# Some databases return decimal.Decimal here.
count = int(count)
stats_dict: PoolStats | None = pools.get(pool_name)
if not stats_dict:
continue
# TypedDict key must be a string literal, so we use if-statements to set value
if state == TaskInstanceState.RUNNING:
stats_dict["running"] = count
elif state == TaskInstanceState.QUEUED:
stats_dict["queued"] = count
else:
raise AirflowException(f"Unexpected state. Expected values: {EXECUTION_STATES}.")
# calculate open metric
for pool_name, stats_dict in pools.items():
stats_dict["open"] = stats_dict["total"] - stats_dict["running"] - stats_dict["queued"]
return pools
def to_json(self) -> dict[str, Any]:
"""
Get the Pool in a json structure.
:return: the pool object in json format
"""
return {
"id": self.id,
"pool": self.pool,
"slots": self.slots,
"description": self.description,
}
@provide_session
def occupied_slots(self, session: Session = NEW_SESSION) -> int:
"""
Get the number of slots used by running/queued tasks at the moment.
:param session: SQLAlchemy ORM Session
:return: the used number of slots
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
return int(
session.scalar(
select(func.sum(TaskInstance.pool_slots))
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state.in_(EXECUTION_STATES))
)
or 0
)
@provide_session
def running_slots(self, session: Session = NEW_SESSION) -> int:
"""
Get the number of slots used by running tasks at the moment.
:param session: SQLAlchemy ORM Session
:return: the used number of slots
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
return int(
session.scalar(
select(func.sum(TaskInstance.pool_slots))
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == TaskInstanceState.RUNNING)
)
or 0
)
@provide_session
def queued_slots(self, session: Session = NEW_SESSION) -> int:
"""
Get the number of slots used by queued tasks at the moment.
:param session: SQLAlchemy ORM Session
:return: the used number of slots
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
return int(
session.scalar(
select(func.sum(TaskInstance.pool_slots))
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == TaskInstanceState.QUEUED)
)
or 0
)
@provide_session
def scheduled_slots(self, session: Session = NEW_SESSION) -> int:
"""
Get the number of slots scheduled at the moment.
:param session: SQLAlchemy ORM Session
:return: the number of scheduled slots
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
return int(
session.scalar(
select(func.sum(TaskInstance.pool_slots))
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == TaskInstanceState.SCHEDULED)
)
or 0
)
@provide_session
def open_slots(self, session: Session = NEW_SESSION) -> float:
"""
Get the number of slots open at the moment.
:param session: SQLAlchemy ORM Session
:return: the number of slots
"""
if self.slots == -1:
return float("inf")
return self.slots - self.occupied_slots(session)
| 10,127 | 32.315789 | 105 |
py
|
airflow
|
airflow-main/airflow/models/dagwarning.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from enum import Enum
from sqlalchemy import Column, ForeignKeyConstraint, String, Text, delete, false, select
from sqlalchemy.orm import Session
from airflow.api_internal.internal_api_call import internal_api_call
from airflow.models.base import Base, StringID
from airflow.utils import timezone
from airflow.utils.retries import retry_db_transaction
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
class DagWarning(Base):
"""
A table to store DAG warnings.
DAG warnings are problems that don't rise to the level of failing the DAG parse
but which users should nonetheless be warned about. These warnings are recorded
when parsing DAG and displayed on the Webserver in a flash message.
"""
dag_id = Column(StringID(), primary_key=True)
warning_type = Column(String(50), primary_key=True)
message = Column(Text, nullable=False)
timestamp = Column(UtcDateTime, nullable=False, default=timezone.utcnow)
__tablename__ = "dag_warning"
__table_args__ = (
ForeignKeyConstraint(
("dag_id",),
["dag.dag_id"],
name="dcw_dag_id_fkey",
ondelete="CASCADE",
),
)
def __init__(self, dag_id: str, error_type: str, message: str, **kwargs):
super().__init__(**kwargs)
self.dag_id = dag_id
self.warning_type = DagWarningType(error_type).value # make sure valid type
self.message = message
def __eq__(self, other) -> bool:
return self.dag_id == other.dag_id and self.warning_type == other.warning_type
def __hash__(self) -> int:
return hash((self.dag_id, self.warning_type))
@classmethod
@internal_api_call
@provide_session
def purge_inactive_dag_warnings(cls, session: Session = NEW_SESSION) -> None:
"""
Deactivate DagWarning records for inactive dags.
:return: None
"""
cls._purge_inactive_dag_warnings_with_retry(session)
@classmethod
@retry_db_transaction
def _purge_inactive_dag_warnings_with_retry(cls, session: Session) -> None:
from airflow.models.dag import DagModel
if session.get_bind().dialect.name == "sqlite":
dag_ids_stmt = select(DagModel.dag_id).where(DagModel.is_active == false())
query = delete(cls).where(cls.dag_id.in_(dag_ids_stmt.scalar_subquery()))
else:
query = delete(cls).where(cls.dag_id == DagModel.dag_id, DagModel.is_active == false())
session.execute(query.execution_options(synchronize_session=False))
session.commit()
class DagWarningType(str, Enum):
"""
Enum for DAG warning types.
This is the set of allowable values for the ``warning_type`` field
in the DagWarning model.
"""
NONEXISTENT_POOL = "non-existent pool"
| 3,687 | 34.461538 | 99 |
py
|
airflow
|
airflow-main/airflow/models/taskreschedule.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TaskReschedule tracks rescheduled task instances."""
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING
from sqlalchemy import Column, ForeignKeyConstraint, Index, Integer, String, asc, desc, event, text
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import Query, Session, relationship
from airflow.models.base import COLLATION_ARGS, ID_LEN, Base
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
if TYPE_CHECKING:
from airflow.models.operator import Operator
from airflow.models.taskinstance import TaskInstance
class TaskReschedule(Base):
"""TaskReschedule tracks rescheduled task instances."""
__tablename__ = "task_reschedule"
id = Column(Integer, primary_key=True)
task_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
run_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
map_index = Column(Integer, nullable=False, server_default=text("-1"))
try_number = Column(Integer, nullable=False)
start_date = Column(UtcDateTime, nullable=False)
end_date = Column(UtcDateTime, nullable=False)
duration = Column(Integer, nullable=False)
reschedule_date = Column(UtcDateTime, nullable=False)
__table_args__ = (
Index("idx_task_reschedule_dag_task_run", dag_id, task_id, run_id, map_index, unique=False),
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_reschedule_ti_fkey",
ondelete="CASCADE",
),
Index("idx_task_reschedule_dag_run", dag_id, run_id),
ForeignKeyConstraint(
[dag_id, run_id],
["dag_run.dag_id", "dag_run.run_id"],
name="task_reschedule_dr_fkey",
ondelete="CASCADE",
),
)
dag_run = relationship("DagRun")
execution_date = association_proxy("dag_run", "execution_date")
def __init__(
self,
task: Operator,
run_id: str,
try_number: int,
start_date: datetime.datetime,
end_date: datetime.datetime,
reschedule_date: datetime.datetime,
map_index: int = -1,
) -> None:
self.dag_id = task.dag_id
self.task_id = task.task_id
self.run_id = run_id
self.map_index = map_index
self.try_number = try_number
self.start_date = start_date
self.end_date = end_date
self.reschedule_date = reschedule_date
self.duration = (self.end_date - self.start_date).total_seconds()
@staticmethod
@provide_session
def query_for_task_instance(
task_instance: TaskInstance,
descending: bool = False,
session: Session = NEW_SESSION,
try_number: int | None = None,
) -> Query:
"""
Returns query for task reschedules for a given the task instance.
:param session: the database session object
:param task_instance: the task instance to find task reschedules for
:param descending: If True then records are returned in descending order
:param try_number: Look for TaskReschedule of the given try_number. Default is None which
looks for the same try_number of the given task_instance.
"""
if try_number is None:
try_number = task_instance.try_number
TR = TaskReschedule
qry = session.query(TR).filter(
TR.dag_id == task_instance.dag_id,
TR.task_id == task_instance.task_id,
TR.run_id == task_instance.run_id,
TR.map_index == task_instance.map_index,
TR.try_number == try_number,
)
if descending:
return qry.order_by(desc(TR.id))
else:
return qry.order_by(asc(TR.id))
@staticmethod
@provide_session
def find_for_task_instance(
task_instance: TaskInstance,
session: Session = NEW_SESSION,
try_number: int | None = None,
) -> list[TaskReschedule]:
"""
Returns all task reschedules for the task instance and try number,
in ascending order.
:param session: the database session object
:param task_instance: the task instance to find task reschedules for
:param try_number: Look for TaskReschedule of the given try_number. Default is None which
looks for the same try_number of the given task_instance.
"""
return TaskReschedule.query_for_task_instance(
task_instance, session=session, try_number=try_number
).all()
@event.listens_for(TaskReschedule.__table__, "before_create")
def add_ondelete_for_mssql(table, conn, **kw):
if conn.dialect.name != "mssql":
return
for constraint in table.constraints:
if constraint.name != "task_reschedule_dr_fkey":
continue
constraint.ondelete = "NO ACTION"
return
| 6,007 | 36.31677 | 100 |
py
|
airflow
|
airflow-main/airflow/models/dagrun.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import itertools
import os
import warnings
from collections import defaultdict
from datetime import datetime
from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, NamedTuple, Sequence, TypeVar, overload
import re2
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
ForeignKeyConstraint,
Index,
Integer,
PickleType,
PrimaryKeyConstraint,
String,
Text,
UniqueConstraint,
and_,
func,
or_,
text,
update,
)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import Query, Session, declared_attr, joinedload, relationship, synonym, validates
from sqlalchemy.sql.expression import false, select, true
from airflow import settings
from airflow.callbacks.callback_requests import DagCallbackRequest
from airflow.configuration import conf as airflow_conf
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning, TaskNotFound
from airflow.listeners.listener import get_listener_manager
from airflow.models.abstractoperator import NotMapped
from airflow.models.base import Base, StringID
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.taskinstance import TaskInstance as TI
from airflow.models.tasklog import LogTemplate
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_states import SCHEDULEABLE_STATES
from airflow.typing_compat import Literal
from airflow.utils import timezone
from airflow.utils.helpers import chunks, is_container, prune_dict
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import UtcDateTime, nulls_first, skip_locked, tuple_in_condition, with_row_locks
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.types import NOTSET, ArgNotSet, DagRunType
if TYPE_CHECKING:
from airflow.models.dag import DAG
from airflow.models.operator import Operator
CreatedTasks = TypeVar("CreatedTasks", Iterator["dict[str, Any]"], Iterator[TI])
TaskCreator = Callable[[Operator, Iterable[int]], CreatedTasks]
RUN_ID_REGEX = r"^(?:manual|scheduled|dataset_triggered)__(?:\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00)$"
class TISchedulingDecision(NamedTuple):
"""Type of return for DagRun.task_instance_scheduling_decisions."""
tis: list[TI]
schedulable_tis: list[TI]
changed_tis: bool
unfinished_tis: list[TI]
finished_tis: list[TI]
def _creator_note(val):
"""Custom creator for the ``note`` association proxy."""
if isinstance(val, str):
return DagRunNote(content=val)
elif isinstance(val, dict):
return DagRunNote(**val)
else:
return DagRunNote(*val)
class DagRun(Base, LoggingMixin):
"""Invocation instance of a DAG.
A DAG run can be created by the scheduler (i.e. scheduled runs), or by an
external trigger (i.e. manual runs).
"""
__tablename__ = "dag_run"
id = Column(Integer, primary_key=True)
dag_id = Column(StringID(), nullable=False)
queued_at = Column(UtcDateTime)
execution_date = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
_state = Column("state", String(50), default=DagRunState.QUEUED)
run_id = Column(StringID(), nullable=False)
creating_job_id = Column(Integer)
external_trigger = Column(Boolean, default=True)
run_type = Column(String(50), nullable=False)
conf = Column(PickleType)
# These two must be either both NULL or both datetime.
data_interval_start = Column(UtcDateTime)
data_interval_end = Column(UtcDateTime)
# When a scheduler last attempted to schedule TIs for this DagRun
last_scheduling_decision = Column(UtcDateTime)
dag_hash = Column(String(32))
# Foreign key to LogTemplate. DagRun rows created prior to this column's
# existence have this set to NULL. Later rows automatically populate this on
# insert to point to the latest LogTemplate entry.
log_template_id = Column(
Integer,
ForeignKey("log_template.id", name="task_instance_log_template_id_fkey", ondelete="NO ACTION"),
default=select(func.max(LogTemplate.__table__.c.id)),
)
updated_at = Column(UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow)
# Remove this `if` after upgrading Sphinx-AutoAPI
if not TYPE_CHECKING and "BUILDING_AIRFLOW_DOCS" in os.environ:
dag: DAG | None
else:
dag: DAG | None = None
__table_args__ = (
Index("dag_id_state", dag_id, _state),
UniqueConstraint("dag_id", "execution_date", name="dag_run_dag_id_execution_date_key"),
UniqueConstraint("dag_id", "run_id", name="dag_run_dag_id_run_id_key"),
Index("idx_last_scheduling_decision", last_scheduling_decision),
Index("idx_dag_run_dag_id", dag_id),
Index(
"idx_dag_run_running_dags",
"state",
"dag_id",
postgresql_where=text("state='running'"),
mssql_where=text("state='running'"),
sqlite_where=text("state='running'"),
),
# since mysql lacks filtered/partial indices, this creates a
# duplicate index on mysql. Not the end of the world
Index(
"idx_dag_run_queued_dags",
"state",
"dag_id",
postgresql_where=text("state='queued'"),
mssql_where=text("state='queued'"),
sqlite_where=text("state='queued'"),
),
)
task_instances = relationship(
TI, back_populates="dag_run", cascade="save-update, merge, delete, delete-orphan"
)
dag_model = relationship(
"DagModel",
primaryjoin="foreign(DagRun.dag_id) == DagModel.dag_id",
uselist=False,
viewonly=True,
)
dag_run_note = relationship(
"DagRunNote",
back_populates="dag_run",
uselist=False,
cascade="all, delete, delete-orphan",
)
note = association_proxy("dag_run_note", "content", creator=_creator_note)
DEFAULT_DAGRUNS_TO_EXAMINE = airflow_conf.getint(
"scheduler",
"max_dagruns_per_loop_to_schedule",
fallback=20,
)
def __init__(
self,
dag_id: str | None = None,
run_id: str | None = None,
queued_at: datetime | None | ArgNotSet = NOTSET,
execution_date: datetime | None = None,
start_date: datetime | None = None,
external_trigger: bool | None = None,
conf: Any | None = None,
state: DagRunState | None = None,
run_type: str | None = None,
dag_hash: str | None = None,
creating_job_id: int | None = None,
data_interval: tuple[datetime, datetime] | None = None,
):
if data_interval is None:
# Legacy: Only happen for runs created prior to Airflow 2.2.
self.data_interval_start = self.data_interval_end = None
else:
self.data_interval_start, self.data_interval_end = data_interval
self.dag_id = dag_id
self.run_id = run_id
self.execution_date = execution_date
self.start_date = start_date
self.external_trigger = external_trigger
self.conf = conf or {}
if state is not None:
self.state = state
if queued_at is NOTSET:
self.queued_at = timezone.utcnow() if state == DagRunState.QUEUED else None
else:
self.queued_at = queued_at
self.run_type = run_type
self.dag_hash = dag_hash
self.creating_job_id = creating_job_id
super().__init__()
def __repr__(self):
return (
"<DagRun {dag_id} @ {execution_date}: {run_id}, state:{state}, "
"queued_at: {queued_at}. externally triggered: {external_trigger}>"
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
state=self.state,
queued_at=self.queued_at,
external_trigger=self.external_trigger,
)
@validates("run_id")
def validate_run_id(self, key: str, run_id: str) -> str | None:
if not run_id:
return None
regex = airflow_conf.get("scheduler", "allowed_run_id_pattern")
if not re2.match(regex, run_id) and not re2.match(RUN_ID_REGEX, run_id):
raise ValueError(
f"The run_id provided '{run_id}' does not match the pattern '{regex}' or '{RUN_ID_REGEX}'"
)
return run_id
@property
def stats_tags(self) -> dict[str, str]:
return prune_dict({"dag_id": self.dag_id, "run_type": self.run_type})
@property
def logical_date(self) -> datetime:
return self.execution_date
def get_state(self):
return self._state
def set_state(self, state: DagRunState) -> None:
if state not in State.dag_states:
raise ValueError(f"invalid DagRun state: {state}")
if self._state != state:
self._state = state
self.end_date = timezone.utcnow() if self._state in State.finished_dr_states else None
if state == DagRunState.QUEUED:
self.queued_at = timezone.utcnow()
@declared_attr
def state(self):
return synonym("_state", descriptor=property(self.get_state, self.set_state))
@provide_session
def refresh_from_db(self, session: Session = NEW_SESSION) -> None:
"""
Reloads the current dagrun from the database.
:param session: database session
"""
dr = session.scalars(
select(DagRun).where(DagRun.dag_id == self.dag_id, DagRun.run_id == self.run_id)
).one()
self.id = dr.id
self.state = dr.state
@classmethod
@provide_session
def active_runs_of_dags(
cls,
dag_ids: Iterable[str] | None = None,
only_running: bool = False,
session: Session = NEW_SESSION,
) -> dict[str, int]:
"""Get the number of active dag runs for each dag."""
query = select(cls.dag_id, func.count("*"))
if dag_ids is not None:
# 'set' called to avoid duplicate dag_ids, but converted back to 'list'
# because SQLAlchemy doesn't accept a set here.
query = query.where(cls.dag_id.in_(set(dag_ids)))
if only_running:
query = query.where(cls.state == DagRunState.RUNNING)
else:
query = query.where(cls.state.in_((DagRunState.RUNNING, DagRunState.QUEUED)))
query = query.group_by(cls.dag_id)
return {dag_id: count for dag_id, count in session.execute(query)}
@classmethod
def next_dagruns_to_examine(
cls,
state: DagRunState,
session: Session,
max_number: int | None = None,
) -> Query:
"""
Return the next DagRuns that the scheduler should attempt to schedule.
This will return zero or more DagRun rows that are row-level-locked with a "SELECT ... FOR UPDATE"
query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as
the transaction is committed it will be unlocked.
"""
from airflow.models.dag import DagModel
if max_number is None:
max_number = cls.DEFAULT_DAGRUNS_TO_EXAMINE
# TODO: Bake this query, it is run _A lot_
query = (
select(cls)
.with_hint(cls, "USE INDEX (idx_dag_run_running_dags)", dialect_name="mysql")
.where(cls.state == state, cls.run_type != DagRunType.BACKFILL_JOB)
.join(DagModel, DagModel.dag_id == cls.dag_id)
.where(DagModel.is_paused == false(), DagModel.is_active == true())
)
if state == DagRunState.QUEUED:
# For dag runs in the queued state, we check if they have reached the max_active_runs limit
# and if so we drop them
running_drs = (
select(DagRun.dag_id, func.count(DagRun.state).label("num_running"))
.where(DagRun.state == DagRunState.RUNNING)
.group_by(DagRun.dag_id)
.subquery()
)
query = query.outerjoin(running_drs, running_drs.c.dag_id == DagRun.dag_id).where(
func.coalesce(running_drs.c.num_running, 0) < DagModel.max_active_runs
)
query = query.order_by(
nulls_first(cls.last_scheduling_decision, session=session),
cls.execution_date,
)
if not settings.ALLOW_FUTURE_EXEC_DATES:
query = query.where(DagRun.execution_date <= func.now())
return session.scalars(
with_row_locks(query.limit(max_number), of=cls, session=session, **skip_locked(session=session))
)
@classmethod
@provide_session
def find(
cls,
dag_id: str | list[str] | None = None,
run_id: Iterable[str] | None = None,
execution_date: datetime | Iterable[datetime] | None = None,
state: DagRunState | None = None,
external_trigger: bool | None = None,
no_backfills: bool = False,
run_type: DagRunType | None = None,
session: Session = NEW_SESSION,
execution_start_date: datetime | None = None,
execution_end_date: datetime | None = None,
) -> list[DagRun]:
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id or list of dag_id to find dag runs for
:param run_id: defines the run id for this dag run
:param run_type: type of DagRun
:param execution_date: the execution date
:param state: the state of the dag run
:param external_trigger: whether this dag run is externally triggered
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:param session: database session
:param execution_start_date: dag run that was executed from this date
:param execution_end_date: dag run that was executed until this date
"""
qry = select(cls)
dag_ids = [dag_id] if isinstance(dag_id, str) else dag_id
if dag_ids:
qry = qry.where(cls.dag_id.in_(dag_ids))
if is_container(run_id):
qry = qry.where(cls.run_id.in_(run_id))
elif run_id is not None:
qry = qry.where(cls.run_id == run_id)
if is_container(execution_date):
qry = qry.where(cls.execution_date.in_(execution_date))
elif execution_date is not None:
qry = qry.where(cls.execution_date == execution_date)
if execution_start_date and execution_end_date:
qry = qry.where(cls.execution_date.between(execution_start_date, execution_end_date))
elif execution_start_date:
qry = qry.where(cls.execution_date >= execution_start_date)
elif execution_end_date:
qry = qry.where(cls.execution_date <= execution_end_date)
if state:
qry = qry.where(cls.state == state)
if external_trigger is not None:
qry = qry.where(cls.external_trigger == external_trigger)
if run_type:
qry = qry.where(cls.run_type == run_type)
if no_backfills:
qry = qry.where(cls.run_type != DagRunType.BACKFILL_JOB)
return session.scalars(qry.order_by(cls.execution_date)).all()
@classmethod
@provide_session
def find_duplicate(
cls,
dag_id: str,
run_id: str,
execution_date: datetime,
session: Session = NEW_SESSION,
) -> DagRun | None:
"""
Return an existing run for the DAG with a specific run_id or execution_date.
*None* is returned if no such DAG run is found.
:param dag_id: the dag_id to find duplicates for
:param run_id: defines the run id for this dag run
:param execution_date: the execution date
:param session: database session
"""
return session.scalars(
select(cls).where(
cls.dag_id == dag_id,
or_(cls.run_id == run_id, cls.execution_date == execution_date),
)
).one_or_none()
@staticmethod
def generate_run_id(run_type: DagRunType, execution_date: datetime) -> str:
"""Generate Run ID based on Run Type and Execution Date."""
# _Ensure_ run_type is a DagRunType, not just a string from user code
return DagRunType(run_type).generate_run_id(execution_date)
@provide_session
def get_task_instances(
self,
state: Iterable[TaskInstanceState | None] | None = None,
session: Session = NEW_SESSION,
) -> list[TI]:
"""Returns the task instances for this dag run."""
tis = (
select(TI)
.options(joinedload(TI.dag_run))
.where(
TI.dag_id == self.dag_id,
TI.run_id == self.run_id,
)
)
if state:
if isinstance(state, str):
tis = tis.where(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
if all(x is None for x in state):
tis = tis.where(TI.state.is_(None))
else:
not_none_state = (s for s in state if s)
tis = tis.where(or_(TI.state.in_(not_none_state), TI.state.is_(None)))
else:
tis = tis.where(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.where(TI.task_id.in_(self.dag.task_ids))
return session.scalars(tis).all()
@provide_session
def get_task_instance(
self,
task_id: str,
session: Session = NEW_SESSION,
*,
map_index: int = -1,
) -> TI | None:
"""
Returns the task instance specified by task_id for this dag run.
:param task_id: the task id
:param session: Sqlalchemy ORM Session
"""
return session.scalars(
select(TI).filter_by(dag_id=self.dag_id, run_id=self.run_id, task_id=task_id, map_index=map_index)
).one_or_none()
def get_dag(self) -> DAG:
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException(f"The DAG (.dag) for {self} needs to be set")
return self.dag
@provide_session
def get_previous_dagrun(
self, state: DagRunState | None = None, session: Session = NEW_SESSION
) -> DagRun | None:
"""The previous DagRun, if there is one."""
filters = [
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date,
]
if state is not None:
filters.append(DagRun.state == state)
return session.scalar(select(DagRun).where(*filters).order_by(DagRun.execution_date.desc()))
@provide_session
def get_previous_scheduled_dagrun(self, session: Session = NEW_SESSION) -> DagRun | None:
"""The previous, SCHEDULED DagRun, if there is one."""
return session.scalar(
select(DagRun)
.where(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date,
DagRun.run_type != DagRunType.MANUAL,
)
.order_by(DagRun.execution_date.desc())
)
def _tis_for_dagrun_state(self, *, dag, tis):
"""
Return the collection of tasks that should be considered for evaluation of terminal dag run state.
Teardown tasks by default are not considered for the purpose of dag run state. But
users may enable such consideration with on_failure_fail_dagrun.
"""
def is_effective_leaf(task):
for down_task_id in task.downstream_task_ids:
down_task = dag.get_task(down_task_id)
if not down_task.is_teardown or down_task.on_failure_fail_dagrun:
# we found a down task that is not ignorable; not a leaf
return False
# we found no ignorable downstreams
# evaluate whether task is itself ignorable
return not task.is_teardown or task.on_failure_fail_dagrun
leaf_task_ids = {x.task_id for x in dag.tasks if is_effective_leaf(x)}
if not leaf_task_ids:
# can happen if dag is exclusively teardown tasks
leaf_task_ids = {x.task_id for x in dag.tasks if not x.downstream_list}
leaf_tis = {ti for ti in tis if ti.task_id in leaf_task_ids if ti.state != TaskInstanceState.REMOVED}
return leaf_tis
@provide_session
def update_state(
self, session: Session = NEW_SESSION, execute_callbacks: bool = True
) -> tuple[list[TI], DagCallbackRequest | None]:
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:param session: Sqlalchemy ORM Session
:param execute_callbacks: Should dag callbacks (success/failure, SLA etc.) be invoked
directly (default: true) or recorded as a pending request in the ``returned_callback`` property
:return: Tuple containing tis that can be scheduled in the current loop & `returned_callback` that
needs to be executed
"""
# Callback to execute in case of Task Failures
callback: DagCallbackRequest | None = None
class _UnfinishedStates(NamedTuple):
tis: Sequence[TI]
@classmethod
def calculate(cls, unfinished_tis: Sequence[TI]) -> _UnfinishedStates:
return cls(tis=unfinished_tis)
@property
def should_schedule(self) -> bool:
return (
bool(self.tis)
and all(not t.task.depends_on_past for t in self.tis)
and all(t.task.max_active_tis_per_dag is None for t in self.tis)
and all(t.task.max_active_tis_per_dagrun is None for t in self.tis)
and all(t.state != TaskInstanceState.DEFERRED for t in self.tis)
)
def recalculate(self) -> _UnfinishedStates:
return self._replace(tis=[t for t in self.tis if t.state in State.unfinished])
start_dttm = timezone.utcnow()
self.last_scheduling_decision = start_dttm
with Stats.timer(
f"dagrun.dependency-check.{self.dag_id}",
tags=self.stats_tags,
):
dag = self.get_dag()
info = self.task_instance_scheduling_decisions(session)
tis = info.tis
schedulable_tis = info.schedulable_tis
changed_tis = info.changed_tis
finished_tis = info.finished_tis
unfinished = _UnfinishedStates.calculate(info.unfinished_tis)
if unfinished.should_schedule:
are_runnable_tasks = schedulable_tis or changed_tis
# small speed up
if not are_runnable_tasks:
are_runnable_tasks, changed_by_upstream = self._are_premature_tis(
unfinished.tis, finished_tis, session
)
if changed_by_upstream: # Something changed, we need to recalculate!
unfinished = unfinished.recalculate()
tis_for_dagrun_state = self._tis_for_dagrun_state(dag=dag, tis=tis)
# if all tasks finished and at least one failed, the run failed
if not unfinished.tis and any(x.state in State.failed_states for x in tis_for_dagrun_state):
self.log.error("Marking run %s failed", self)
self.set_state(DagRunState.FAILED)
self.notify_dagrun_state_changed(msg="task_failure")
if execute_callbacks:
dag.handle_callback(self, success=False, reason="task_failure", session=session)
elif dag.has_on_failure_callback:
from airflow.models.dag import DagModel
dag_model = DagModel.get_dagmodel(dag.dag_id, session)
callback = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=self.dag_id,
run_id=self.run_id,
is_failure_callback=True,
processor_subdir=None if dag_model is None else dag_model.processor_subdir,
msg="task_failure",
)
# if all leaves succeeded and no unfinished tasks, the run succeeded
elif not unfinished.tis and all(x.state in State.success_states for x in tis_for_dagrun_state):
self.log.info("Marking run %s successful", self)
self.set_state(DagRunState.SUCCESS)
self.notify_dagrun_state_changed(msg="success")
if execute_callbacks:
dag.handle_callback(self, success=True, reason="success", session=session)
elif dag.has_on_success_callback:
from airflow.models.dag import DagModel
dag_model = DagModel.get_dagmodel(dag.dag_id, session)
callback = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=self.dag_id,
run_id=self.run_id,
is_failure_callback=False,
processor_subdir=None if dag_model is None else dag_model.processor_subdir,
msg="success",
)
# if *all tasks* are deadlocked, the run failed
elif unfinished.should_schedule and not are_runnable_tasks:
self.log.error("Task deadlock (no runnable tasks); marking run %s failed", self)
self.set_state(DagRunState.FAILED)
self.notify_dagrun_state_changed(msg="all_tasks_deadlocked")
if execute_callbacks:
dag.handle_callback(self, success=False, reason="all_tasks_deadlocked", session=session)
elif dag.has_on_failure_callback:
from airflow.models.dag import DagModel
dag_model = DagModel.get_dagmodel(dag.dag_id, session)
callback = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=self.dag_id,
run_id=self.run_id,
is_failure_callback=True,
processor_subdir=None if dag_model is None else dag_model.processor_subdir,
msg="all_tasks_deadlocked",
)
# finally, if the roots aren't done, the dag is still running
else:
self.set_state(DagRunState.RUNNING)
if self._state == DagRunState.FAILED or self._state == DagRunState.SUCCESS:
msg = (
"DagRun Finished: dag_id=%s, execution_date=%s, run_id=%s, "
"run_start_date=%s, run_end_date=%s, run_duration=%s, "
"state=%s, external_trigger=%s, run_type=%s, "
"data_interval_start=%s, data_interval_end=%s, dag_hash=%s"
)
self.log.info(
msg,
self.dag_id,
self.execution_date,
self.run_id,
self.start_date,
self.end_date,
(self.end_date - self.start_date).total_seconds()
if self.start_date and self.end_date
else None,
self._state,
self.external_trigger,
self.run_type,
self.data_interval_start,
self.data_interval_end,
self.dag_hash,
)
session.flush()
self._emit_true_scheduling_delay_stats_for_finished_state(finished_tis)
self._emit_duration_stats_for_finished_state()
session.merge(self)
# We do not flush here for performance reasons(It increases queries count by +20)
return schedulable_tis, callback
@provide_session
def task_instance_scheduling_decisions(self, session: Session = NEW_SESSION) -> TISchedulingDecision:
tis = self.get_task_instances(session=session, state=State.task_states)
self.log.debug("number of tis tasks for %s: %s task(s)", self, len(tis))
def _filter_tis_and_exclude_removed(dag: DAG, tis: list[TI]) -> Iterable[TI]:
"""Populate ``ti.task`` while excluding those missing one, marking them as REMOVED."""
for ti in tis:
try:
ti.task = dag.get_task(ti.task_id)
except TaskNotFound:
if ti.state != TaskInstanceState.REMOVED:
self.log.error("Failed to get task for ti %s. Marking it as removed.", ti)
ti.state = TaskInstanceState.REMOVED
session.flush()
else:
yield ti
tis = list(_filter_tis_and_exclude_removed(self.get_dag(), tis))
unfinished_tis = [t for t in tis if t.state in State.unfinished]
finished_tis = [t for t in tis if t.state in State.finished]
if unfinished_tis:
schedulable_tis = [ut for ut in unfinished_tis if ut.state in SCHEDULEABLE_STATES]
self.log.debug("number of scheduleable tasks for %s: %s task(s)", self, len(schedulable_tis))
schedulable_tis, changed_tis, expansion_happened = self._get_ready_tis(
schedulable_tis,
finished_tis,
session=session,
)
# During expansion, we may change some tis into non-schedulable
# states, so we need to re-compute.
if expansion_happened:
changed_tis = True
new_unfinished_tis = [t for t in unfinished_tis if t.state in State.unfinished]
finished_tis.extend(t for t in unfinished_tis if t.state in State.finished)
unfinished_tis = new_unfinished_tis
else:
schedulable_tis = []
changed_tis = False
return TISchedulingDecision(
tis=tis,
schedulable_tis=schedulable_tis,
changed_tis=changed_tis,
unfinished_tis=unfinished_tis,
finished_tis=finished_tis,
)
def notify_dagrun_state_changed(self, msg: str = ""):
if self.state == DagRunState.RUNNING:
get_listener_manager().hook.on_dag_run_running(dag_run=self, msg=msg)
elif self.state == DagRunState.SUCCESS:
get_listener_manager().hook.on_dag_run_success(dag_run=self, msg=msg)
elif self.state == DagRunState.FAILED:
get_listener_manager().hook.on_dag_run_failed(dag_run=self, msg=msg)
# deliberately not notifying on QUEUED
# we can't get all the state changes on SchedulerJob, BackfillJob
# or LocalTaskJob, so we don't want to "falsely advertise" we notify about that
def _get_ready_tis(
self,
schedulable_tis: list[TI],
finished_tis: list[TI],
session: Session,
) -> tuple[list[TI], bool, bool]:
old_states = {}
ready_tis: list[TI] = []
changed_tis = False
if not schedulable_tis:
return ready_tis, changed_tis, False
# If we expand TIs, we need a new list so that we iterate over them too. (We can't alter
# `schedulable_tis` in place and have the `for` loop pick them up
additional_tis: list[TI] = []
dep_context = DepContext(
flag_upstream_failed=True,
ignore_unmapped_tasks=True, # Ignore this Dep, as we will expand it if we can.
finished_tis=finished_tis,
)
def _expand_mapped_task_if_needed(ti: TI) -> Iterable[TI] | None:
"""Try to expand the ti, if needed.
If the ti needs expansion, newly created task instances are
returned as well as the original ti.
The original ti is also modified in-place and assigned the
``map_index`` of 0.
If the ti does not need expansion, either because the task is not
mapped, or has already been expanded, *None* is returned.
"""
if ti.map_index >= 0: # Already expanded, we're good.
return None
from airflow.models.mappedoperator import MappedOperator
if isinstance(ti.task, MappedOperator):
# If we get here, it could be that we are moving from non-mapped to mapped
# after task instance clearing or this ti is not yet expanded. Safe to clear
# the db references.
ti.clear_db_references(session=session)
try:
expanded_tis, _ = ti.task.expand_mapped_task(self.run_id, session=session)
except NotMapped: # Not a mapped task, nothing needed.
return None
if expanded_tis:
return expanded_tis
return ()
# Check dependencies.
expansion_happened = False
# Set of task ids for which was already done _revise_map_indexes_if_mapped
revised_map_index_task_ids = set()
for schedulable in itertools.chain(schedulable_tis, additional_tis):
old_state = schedulable.state
if not schedulable.are_dependencies_met(session=session, dep_context=dep_context):
old_states[schedulable.key] = old_state
continue
# If schedulable is not yet expanded, try doing it now. This is
# called in two places: First and ideally in the mini scheduler at
# the end of LocalTaskJob, and then as an "expansion of last resort"
# in the scheduler to ensure that the mapped task is correctly
# expanded before executed. Also see _revise_map_indexes_if_mapped
# docstring for additional information.
new_tis = None
if schedulable.map_index < 0:
new_tis = _expand_mapped_task_if_needed(schedulable)
if new_tis is not None:
additional_tis.extend(new_tis)
expansion_happened = True
if new_tis is None and schedulable.state in SCHEDULEABLE_STATES:
# It's enough to revise map index once per task id,
# checking the map index for each mapped task significantly slows down scheduling
if schedulable.task.task_id not in revised_map_index_task_ids:
ready_tis.extend(self._revise_map_indexes_if_mapped(schedulable.task, session=session))
revised_map_index_task_ids.add(schedulable.task.task_id)
ready_tis.append(schedulable)
# Check if any ti changed state
tis_filter = TI.filter_for_tis(old_states)
if tis_filter is not None:
fresh_tis = session.scalars(select(TI).where(tis_filter)).all()
changed_tis = any(ti.state != old_states[ti.key] for ti in fresh_tis)
return ready_tis, changed_tis, expansion_happened
def _are_premature_tis(
self,
unfinished_tis: Sequence[TI],
finished_tis: list[TI],
session: Session,
) -> tuple[bool, bool]:
dep_context = DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True,
ignore_in_reschedule_period=True,
finished_tis=finished_tis,
)
# there might be runnable tasks that are up for retry and for some reason(retry delay, etc.) are
# not ready yet, so we set the flags to count them in
return (
any(ut.are_dependencies_met(dep_context=dep_context, session=session) for ut in unfinished_tis),
dep_context.have_changed_ti_states,
)
def _emit_true_scheduling_delay_stats_for_finished_state(self, finished_tis: list[TI]) -> None:
"""Emit the true scheduling delay stats.
The true scheduling delay stats is defined as the time when the first
task in DAG starts minus the expected DAG run datetime.
This helper method is used in ``update_state`` when the state of the
DAG run is updated to a completed status (either success or failure).
It finds the first started task within the DAG, calculates the run's
expected start time based on the logical date and timetable, and gets
the delay from the difference of these two values.
The emitted data may contain outliers (e.g. when the first task was
cleared, so the second task's start date will be used), but we can get
rid of the outliers on the stats side through dashboards tooling.
Note that the stat will only be emitted for scheduler-triggered DAG runs
(i.e. when ``external_trigger`` is *False*).
"""
if self.state == TaskInstanceState.RUNNING:
return
if self.external_trigger:
return
if not finished_tis:
return
try:
dag = self.get_dag()
if not dag.timetable.periodic:
# We can't emit this metric if there is no following schedule to calculate from!
return
try:
first_start_date = min(ti.start_date for ti in finished_tis if ti.start_date)
except ValueError: # No start dates at all.
pass
else:
# TODO: Logically, this should be DagRunInfo.run_after, but the
# information is not stored on a DagRun, only before the actual
# execution on DagModel.next_dagrun_create_after. We should add
# a field on DagRun for this instead of relying on the run
# always happening immediately after the data interval.
data_interval_end = dag.get_run_data_interval(self).end
true_delay = first_start_date - data_interval_end
if true_delay.total_seconds() > 0:
Stats.timing(
f"dagrun.{dag.dag_id}.first_task_scheduling_delay", true_delay, tags=self.stats_tags
)
Stats.timing("dagrun.first_task_scheduling_delay", true_delay, tags=self.stats_tags)
except Exception:
self.log.warning("Failed to record first_task_scheduling_delay metric:", exc_info=True)
def _emit_duration_stats_for_finished_state(self):
if self.state == DagRunState.RUNNING:
return
if self.start_date is None:
self.log.warning("Failed to record duration of %s: start_date is not set.", self)
return
if self.end_date is None:
self.log.warning("Failed to record duration of %s: end_date is not set.", self)
return
duration = self.end_date - self.start_date
timer_params = {"dt": duration, "tags": self.stats_tags}
Stats.timing(f"dagrun.duration.{self.state.value}.{self.dag_id}", **timer_params)
Stats.timing(f"dagrun.duration.{self.state.value}", **timer_params)
@provide_session
def verify_integrity(self, *, session: Session = NEW_SESSION) -> None:
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
:missing_indexes: A dictionary of task vs indexes that are missing.
:param session: Sqlalchemy ORM Session
"""
from airflow.settings import task_instance_mutation_hook
# Set for the empty default in airflow.settings -- if it's not set this means it has been changed
# Note: Literal[True, False] instead of bool because otherwise it doesn't correctly find the overload.
hook_is_noop: Literal[True, False] = getattr(task_instance_mutation_hook, "is_noop", False)
dag = self.get_dag()
task_ids = self._check_for_removed_or_restored_tasks(
dag, task_instance_mutation_hook, session=session
)
def task_filter(task: Operator) -> bool:
return task.task_id not in task_ids and (
self.is_backfill
or task.start_date <= self.execution_date
and (task.end_date is None or self.execution_date <= task.end_date)
)
created_counts: dict[str, int] = defaultdict(int)
task_creator = self._get_task_creator(created_counts, task_instance_mutation_hook, hook_is_noop)
# Create the missing tasks, including mapped tasks
tasks_to_create = (task for task in dag.task_dict.values() if task_filter(task))
tis_to_create = self._create_tasks(tasks_to_create, task_creator, session=session)
self._create_task_instances(self.dag_id, tis_to_create, created_counts, hook_is_noop, session=session)
def _check_for_removed_or_restored_tasks(
self, dag: DAG, ti_mutation_hook, *, session: Session
) -> set[str]:
"""
Check for removed tasks/restored/missing tasks.
:param dag: DAG object corresponding to the dagrun
:param ti_mutation_hook: task_instance_mutation_hook function
:param session: Sqlalchemy ORM Session
:return: Task IDs in the DAG run
"""
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = set()
for ti in tis:
ti_mutation_hook(ti)
task_ids.add(ti.task_id)
try:
task = dag.get_task(ti.task_id)
should_restore_task = (task is not None) and ti.state == TaskInstanceState.REMOVED
if should_restore_task:
self.log.info("Restoring task '%s' which was previously removed from DAG '%s'", ti, dag)
Stats.incr(f"task_restored_to_dag.{dag.dag_id}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("task_restored_to_dag", tags={**self.stats_tags, "dag_id": dag.dag_id})
ti.state = None
except AirflowException:
if ti.state == TaskInstanceState.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state != DagRunState.RUNNING and not dag.partial:
self.log.warning("Failed to get task '%s' for dag '%s'. Marking it as removed.", ti, dag)
Stats.incr(f"task_removed_from_dag.{dag.dag_id}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("task_removed_from_dag", tags={**self.stats_tags, "dag_id": dag.dag_id})
ti.state = TaskInstanceState.REMOVED
continue
try:
num_mapped_tis = task.get_parse_time_mapped_ti_count()
except NotMapped:
continue
except NotFullyPopulated:
# What if it is _now_ dynamically mapped, but wasn't before?
try:
total_length = task.get_mapped_ti_count(self.run_id, session=session)
except NotFullyPopulated:
# Not all upstreams finished, so we can't tell what should be here. Remove everything.
if ti.map_index >= 0:
self.log.debug(
"Removing the unmapped TI '%s' as the mapping can't be resolved yet", ti
)
ti.state = TaskInstanceState.REMOVED
continue
# Upstreams finished, check there aren't any extras
if ti.map_index >= total_length:
self.log.debug(
"Removing task '%s' as the map_index is longer than the resolved mapping list (%d)",
ti,
total_length,
)
ti.state = TaskInstanceState.REMOVED
else:
# Check if the number of mapped literals has changed, and we need to mark this TI as removed.
if ti.map_index >= num_mapped_tis:
self.log.debug(
"Removing task '%s' as the map_index is longer than the literal mapping list (%s)",
ti,
num_mapped_tis,
)
ti.state = TaskInstanceState.REMOVED
elif ti.map_index < 0:
self.log.debug("Removing the unmapped TI '%s' as the mapping can now be performed", ti)
ti.state = TaskInstanceState.REMOVED
return task_ids
@overload
def _get_task_creator(
self,
created_counts: dict[str, int],
ti_mutation_hook: Callable,
hook_is_noop: Literal[True],
) -> Callable[[Operator, Iterable[int]], Iterator[dict[str, Any]]]:
...
@overload
def _get_task_creator(
self,
created_counts: dict[str, int],
ti_mutation_hook: Callable,
hook_is_noop: Literal[False],
) -> Callable[[Operator, Iterable[int]], Iterator[TI]]:
...
def _get_task_creator(
self,
created_counts: dict[str, int],
ti_mutation_hook: Callable,
hook_is_noop: Literal[True, False],
) -> Callable[[Operator, Iterable[int]], Iterator[dict[str, Any]] | Iterator[TI]]:
"""
Get the task creator function.
This function also updates the created_counts dictionary with the number of tasks created.
:param created_counts: Dictionary of task_type -> count of created TIs
:param ti_mutation_hook: task_instance_mutation_hook function
:param hook_is_noop: Whether the task_instance_mutation_hook is a noop
"""
if hook_is_noop:
def create_ti_mapping(task: Operator, indexes: Iterable[int]) -> Iterator[dict[str, Any]]:
created_counts[task.task_type] += 1
for map_index in indexes:
yield TI.insert_mapping(self.run_id, task, map_index=map_index)
creator = create_ti_mapping
else:
def create_ti(task: Operator, indexes: Iterable[int]) -> Iterator[TI]:
for map_index in indexes:
ti = TI(task, run_id=self.run_id, map_index=map_index)
ti_mutation_hook(ti)
created_counts[ti.operator] += 1
yield ti
creator = create_ti
return creator
def _create_tasks(
self,
tasks: Iterable[Operator],
task_creator: TaskCreator,
*,
session: Session,
) -> CreatedTasks:
"""
Create missing tasks -- and expand any MappedOperator that _only_ have literals as input.
:param tasks: Tasks to create jobs for in the DAG run
:param task_creator: Function to create task instances
"""
map_indexes: Iterable[int]
for task in tasks:
try:
count = task.get_mapped_ti_count(self.run_id, session=session)
except (NotMapped, NotFullyPopulated):
map_indexes = (-1,)
else:
if count:
map_indexes = range(count)
else:
# Make sure to always create at least one ti; this will be
# marked as REMOVED later at runtime.
map_indexes = (-1,)
yield from task_creator(task, map_indexes)
def _create_task_instances(
self,
dag_id: str,
tasks: Iterator[dict[str, Any]] | Iterator[TI],
created_counts: dict[str, int],
hook_is_noop: bool,
*,
session: Session,
) -> None:
"""
Create the necessary task instances from the given tasks.
:param dag_id: DAG ID associated with the dagrun
:param tasks: the tasks to create the task instances from
:param created_counts: a dictionary of number of tasks -> total ti created by the task creator
:param hook_is_noop: whether the task_instance_mutation_hook is noop
:param session: the session to use
"""
# Fetch the information we need before handling the exception to avoid
# PendingRollbackError due to the session being invalidated on exception
# see https://github.com/apache/superset/pull/530
run_id = self.run_id
try:
if hook_is_noop:
session.bulk_insert_mappings(TI, tasks)
else:
session.bulk_save_objects(tasks)
for task_type, count in created_counts.items():
Stats.incr(f"task_instance_created_{task_type}", count, tags=self.stats_tags)
# Same metric with tagging
Stats.incr("task_instance_created", count, tags={**self.stats_tags, "task_type": task_type})
session.flush()
except IntegrityError:
self.log.info(
"Hit IntegrityError while creating the TIs for %s- %s",
dag_id,
run_id,
exc_info=True,
)
self.log.info("Doing session rollback.")
# TODO[HA]: We probably need to savepoint this so we can keep the transaction alive.
session.rollback()
def _revise_map_indexes_if_mapped(self, task: Operator, *, session: Session) -> Iterator[TI]:
"""Check if task increased or reduced in length and handle appropriately.
Task instances that do not already exist are created and returned if
possible. Expansion only happens if all upstreams are ready; otherwise
we delay expansion to the "last resort". See comments at the call site
for more details.
"""
from airflow.settings import task_instance_mutation_hook
try:
total_length = task.get_mapped_ti_count(self.run_id, session=session)
except NotMapped:
return # Not a mapped task, don't need to do anything.
except NotFullyPopulated:
return # Upstreams not ready, don't need to revise this yet.
query = session.scalars(
select(TI.map_index).where(
TI.dag_id == self.dag_id,
TI.task_id == task.task_id,
TI.run_id == self.run_id,
)
)
existing_indexes = {i for i in query}
removed_indexes = existing_indexes.difference(range(total_length))
if removed_indexes:
session.execute(
update(TI)
.where(
TI.dag_id == self.dag_id,
TI.task_id == task.task_id,
TI.run_id == self.run_id,
TI.map_index.in_(removed_indexes),
)
.values(state=TaskInstanceState.REMOVED)
)
session.flush()
for index in range(total_length):
if index in existing_indexes:
continue
ti = TI(task, run_id=self.run_id, map_index=index, state=None)
self.log.debug("Expanding TIs upserted %s", ti)
task_instance_mutation_hook(ti)
ti = session.merge(ti)
ti.refresh_from_task(task)
session.flush()
yield ti
@staticmethod
def get_run(session: Session, dag_id: str, execution_date: datetime) -> DagRun | None:
"""
Get a single DAG Run.
:meta private:
:param session: Sqlalchemy ORM Session
:param dag_id: DAG ID
:param execution_date: execution date
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
"""
warnings.warn(
"This method is deprecated. Please use SQLAlchemy directly",
RemovedInAirflow3Warning,
stacklevel=2,
)
return session.scalar(
select(DagRun).where(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False, # noqa
DagRun.execution_date == execution_date,
)
)
@property
def is_backfill(self) -> bool:
return self.run_type == DagRunType.BACKFILL_JOB
@classmethod
@provide_session
def get_latest_runs(cls, session: Session = NEW_SESSION) -> list[DagRun]:
"""Returns the latest DagRun for each DAG."""
subquery = (
select(cls.dag_id, func.max(cls.execution_date).label("execution_date"))
.group_by(cls.dag_id)
.subquery()
)
return session.scalars(
select(cls).join(
subquery,
and_(cls.dag_id == subquery.c.dag_id, cls.execution_date == subquery.c.execution_date),
)
).all()
@provide_session
def schedule_tis(
self,
schedulable_tis: Iterable[TI],
session: Session = NEW_SESSION,
max_tis_per_query: int | None = None,
) -> int:
"""
Set the given task instances in to the scheduled state.
Each element of ``schedulable_tis`` should have it's ``task`` attribute already set.
Any EmptyOperator without callbacks or outlets is instead set straight to the success state.
All the TIs should belong to this DagRun, but this code is in the hot-path, this is not checked -- it
is the caller's responsibility to call this function only with TIs from a single dag run.
"""
# Get list of TI IDs that do not need to executed, these are
# tasks using EmptyOperator and without on_execute_callback / on_success_callback
dummy_ti_ids = []
schedulable_ti_ids = []
for ti in schedulable_tis:
if (
ti.task.inherits_from_empty_operator
and not ti.task.on_execute_callback
and not ti.task.on_success_callback
and not ti.task.outlets
):
dummy_ti_ids.append((ti.task_id, ti.map_index))
else:
schedulable_ti_ids.append((ti.task_id, ti.map_index))
count = 0
if schedulable_ti_ids:
schedulable_ti_ids_chunks = chunks(
schedulable_ti_ids, max_tis_per_query or len(schedulable_ti_ids)
)
for schedulable_ti_ids_chunk in schedulable_ti_ids_chunks:
count += session.execute(
update(TI)
.where(
TI.dag_id == self.dag_id,
TI.run_id == self.run_id,
tuple_in_condition((TI.task_id, TI.map_index), schedulable_ti_ids_chunk),
)
.values(state=TaskInstanceState.SCHEDULED)
.execution_options(synchronize_session=False)
).rowcount
# Tasks using EmptyOperator should not be executed, mark them as success
if dummy_ti_ids:
dummy_ti_ids_chunks = chunks(dummy_ti_ids, max_tis_per_query or len(dummy_ti_ids))
for dummy_ti_ids_chunk in dummy_ti_ids_chunks:
count += session.execute(
update(TI)
.where(
TI.dag_id == self.dag_id,
TI.run_id == self.run_id,
tuple_in_condition((TI.task_id, TI.map_index), dummy_ti_ids_chunk),
)
.values(
state=TaskInstanceState.SUCCESS,
start_date=timezone.utcnow(),
end_date=timezone.utcnow(),
duration=0,
)
.execution_options(
synchronize_session=False,
)
).rowcount
return count
@provide_session
def get_log_template(self, *, session: Session = NEW_SESSION) -> LogTemplate:
if self.log_template_id is None: # DagRun created before LogTemplate introduction.
template = session.scalar(select(LogTemplate).order_by(LogTemplate.id))
else:
template = session.get(LogTemplate, self.log_template_id)
if template is None:
raise AirflowException(
f"No log_template entry found for ID {self.log_template_id!r}. "
f"Please make sure you set up the metadatabase correctly."
)
return template
@provide_session
def get_log_filename_template(self, *, session: Session = NEW_SESSION) -> str:
warnings.warn(
"This method is deprecated. Please use get_log_template instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_log_template(session=session).filename
class DagRunNote(Base):
"""For storage of arbitrary notes concerning the dagrun instance."""
__tablename__ = "dag_run_note"
user_id = Column(Integer, nullable=True)
dag_run_id = Column(Integer, primary_key=True, nullable=False)
content = Column(String(1000).with_variant(Text(1000), "mysql"))
created_at = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
updated_at = Column(UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=False)
dag_run = relationship("DagRun", back_populates="dag_run_note")
__table_args__ = (
PrimaryKeyConstraint("dag_run_id", name="dag_run_note_pkey"),
ForeignKeyConstraint(
(dag_run_id,),
["dag_run.id"],
name="dag_run_note_dr_fkey",
ondelete="CASCADE",
),
ForeignKeyConstraint(
(user_id,),
["ab_user.id"],
name="dag_run_note_user_fkey",
),
)
def __init__(self, content, user_id=None):
self.content = content
self.user_id = user_id
def __repr__(self):
prefix = f"<{self.__class__.__name__}: {self.dag_id}.{self.dagrun_id} {self.run_id}"
if self.map_index != -1:
prefix += f" map_index={self.map_index}"
return prefix + ">"
| 59,506 | 40.124395 | 110 |
py
|
airflow
|
airflow-main/airflow/models/param.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import copy
import datetime
import json
import logging
import warnings
from typing import TYPE_CHECKING, Any, ClassVar, ItemsView, Iterable, MutableMapping, ValuesView
from pendulum.parsing import parse_iso8601
from airflow.exceptions import AirflowException, ParamValidationError, RemovedInAirflow3Warning
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.mixins import ResolveMixin
from airflow.utils.types import NOTSET, ArgNotSet
if TYPE_CHECKING:
from airflow.models.dag import DAG
from airflow.models.dagrun import DagRun
from airflow.models.operator import Operator
logger = logging.getLogger(__name__)
class Param:
"""
Class to hold the default value of a Param and rule set to do the validations. Without the rule set
it always validates and returns the default value.
:param default: The value this Param object holds
:param description: Optional help text for the Param
:param schema: The validation schema of the Param, if not given then all kwargs except
default & description will form the schema
"""
__version__: ClassVar[int] = 1
CLASS_IDENTIFIER = "__class"
def __init__(self, default: Any = NOTSET, description: str | None = None, **kwargs):
if default is not NOTSET:
self._warn_if_not_json(default)
self.value = default
self.description = description
self.schema = kwargs.pop("schema") if "schema" in kwargs else kwargs
def __copy__(self) -> Param:
return Param(self.value, self.description, schema=self.schema)
@staticmethod
def _warn_if_not_json(value):
try:
json.dumps(value)
except Exception:
warnings.warn(
"The use of non-json-serializable params is deprecated and will be removed in "
"a future release",
RemovedInAirflow3Warning,
)
@staticmethod
def _warn_if_not_rfc3339_dt(value):
"""Fallback to iso8601 datetime validation if rfc3339 failed."""
try:
iso8601_value = parse_iso8601(value)
except Exception:
return None
if not isinstance(iso8601_value, datetime.datetime):
return None
warnings.warn(
f"The use of non-RFC3339 datetime: {value!r} is deprecated "
"and will be removed in a future release",
RemovedInAirflow3Warning,
)
if timezone.is_naive(iso8601_value):
warnings.warn(
"The use naive datetime is deprecated and will be removed in a future release",
RemovedInAirflow3Warning,
)
return value
def resolve(self, value: Any = NOTSET, suppress_exception: bool = False) -> Any:
"""
Runs the validations and returns the Param's final value.
May raise ValueError on failed validations, or TypeError
if no value is passed and no value already exists.
We first check that value is json-serializable; if not, warn.
In future release we will require the value to be json-serializable.
:param value: The value to be updated for the Param
:param suppress_exception: To raise an exception or not when the validations fails.
If true and validations fails, the return value would be None.
"""
import jsonschema
from jsonschema import FormatChecker
from jsonschema.exceptions import ValidationError
if value is not NOTSET:
self._warn_if_not_json(value)
final_val = value if value is not NOTSET else self.value
if isinstance(final_val, ArgNotSet):
if suppress_exception:
return None
raise ParamValidationError("No value passed and Param has no default value")
try:
jsonschema.validate(final_val, self.schema, format_checker=FormatChecker())
except ValidationError as err:
if err.schema.get("format") == "date-time":
rfc3339_value = self._warn_if_not_rfc3339_dt(final_val)
if rfc3339_value:
self.value = rfc3339_value
return rfc3339_value
if suppress_exception:
return None
raise ParamValidationError(err) from None
self.value = final_val
return final_val
def dump(self) -> dict:
"""Dump the Param as a dictionary."""
out_dict = {self.CLASS_IDENTIFIER: f"{self.__module__}.{self.__class__.__name__}"}
out_dict.update(self.__dict__)
return out_dict
@property
def has_value(self) -> bool:
return self.value is not NOTSET
def serialize(self) -> dict:
return {"value": self.value, "description": self.description, "schema": self.schema}
@staticmethod
def deserialize(data: dict[str, Any], version: int) -> Param:
if version > Param.__version__:
raise TypeError("serialized version > class version")
return Param(default=data["value"], description=data["description"], schema=data["schema"])
class ParamsDict(MutableMapping[str, Any]):
"""
Class to hold all params for dags or tasks. All the keys are strictly string and values
are converted into Param's object if they are not already. This class is to replace param's
dictionary implicitly and ideally not needed to be used directly.
"""
__version__: ClassVar[int] = 1
__slots__ = ["__dict", "suppress_exception"]
def __init__(self, dict_obj: MutableMapping | None = None, suppress_exception: bool = False):
"""
:param dict_obj: A dict or dict like object to init ParamsDict
:param suppress_exception: Flag to suppress value exceptions while initializing the ParamsDict
"""
params_dict: dict[str, Param] = {}
dict_obj = dict_obj or {}
for k, v in dict_obj.items():
if not isinstance(v, Param):
params_dict[k] = Param(v)
else:
params_dict[k] = v
self.__dict = params_dict
self.suppress_exception = suppress_exception
def __bool__(self) -> bool:
return bool(self.__dict)
def __eq__(self, other: Any) -> bool:
if isinstance(other, ParamsDict):
return self.dump() == other.dump()
if isinstance(other, dict):
return self.dump() == other
return NotImplemented
def __copy__(self) -> ParamsDict:
return ParamsDict(self.__dict, self.suppress_exception)
def __deepcopy__(self, memo: dict[int, Any] | None) -> ParamsDict:
return ParamsDict(copy.deepcopy(self.__dict, memo), self.suppress_exception)
def __contains__(self, o: object) -> bool:
return o in self.__dict
def __len__(self) -> int:
return len(self.__dict)
def __delitem__(self, v: str) -> None:
del self.__dict[v]
def __iter__(self):
return iter(self.__dict)
def __repr__(self):
return repr(self.dump())
def __setitem__(self, key: str, value: Any) -> None:
"""
Override for dictionary's ``setitem`` method. This method make sure that all values are of
Param's type only.
:param key: A key which needs to be inserted or updated in the dict
:param value: A value which needs to be set against the key. It could be of any
type but will be converted and stored as a Param object eventually.
"""
if isinstance(value, Param):
param = value
elif key in self.__dict:
param = self.__dict[key]
try:
param.resolve(value=value, suppress_exception=self.suppress_exception)
except ParamValidationError as ve:
raise ParamValidationError(f"Invalid input for param {key}: {ve}") from None
else:
# if the key isn't there already and if the value isn't of Param type create a new Param object
param = Param(value)
self.__dict[key] = param
def __getitem__(self, key: str) -> Any:
"""
Override for dictionary's ``getitem`` method. After fetching the key, it would call the
resolve method as well on the Param object.
:param key: The key to fetch
"""
param = self.__dict[key]
return param.resolve(suppress_exception=self.suppress_exception)
def get_param(self, key: str) -> Param:
"""Get the internal :class:`.Param` object for this key."""
return self.__dict[key]
def items(self):
return ItemsView(self.__dict)
def values(self):
return ValuesView(self.__dict)
def update(self, *args, **kwargs) -> None:
if len(args) == 1 and not kwargs and isinstance(args[0], ParamsDict):
return super().update(args[0].__dict)
super().update(*args, **kwargs)
def dump(self) -> dict[str, Any]:
"""Dumps the ParamsDict object as a dictionary, while suppressing exceptions."""
return {k: v.resolve(suppress_exception=True) for k, v in self.items()}
def validate(self) -> dict[str, Any]:
"""Validates & returns all the Params object stored in the dictionary."""
resolved_dict = {}
try:
for k, v in self.items():
resolved_dict[k] = v.resolve(suppress_exception=self.suppress_exception)
except ParamValidationError as ve:
raise ParamValidationError(f"Invalid input for param {k}: {ve}") from None
return resolved_dict
def serialize(self) -> dict[str, Any]:
return self.dump()
@staticmethod
def deserialize(data: dict, version: int) -> ParamsDict:
if version > ParamsDict.__version__:
raise TypeError("serialized version > class version")
return ParamsDict(data)
class DagParam(ResolveMixin):
"""DAG run parameter reference.
This binds a simple Param object to a name within a DAG instance, so that it
can be resolved during the runtime via the ``{{ context }}`` dictionary. The
ideal use case of this class is to implicitly convert args passed to a
method decorated by ``@dag``.
It can be used to parameterize a DAG. You can overwrite its value by setting
it on conf when you trigger your DagRun.
This can also be used in templates by accessing ``{{ context.params }}``.
**Example**:
with DAG(...) as dag:
EmailOperator(subject=dag.param('subject', 'Hi from Airflow!'))
:param current_dag: Dag being used for parameter.
:param name: key value which is used to set the parameter
:param default: Default value used if no parameter was set.
"""
def __init__(self, current_dag: DAG, name: str, default: Any = NOTSET):
if default is not NOTSET:
current_dag.params[name] = default
self._name = name
self._default = default
def iter_references(self) -> Iterable[tuple[Operator, str]]:
return ()
def resolve(self, context: Context) -> Any:
"""Pull DagParam value from DagRun context. This method is run during ``op.execute()``."""
with contextlib.suppress(KeyError):
return context["dag_run"].conf[self._name]
if self._default is not NOTSET:
return self._default
with contextlib.suppress(KeyError):
return context["params"][self._name]
raise AirflowException(f"No value could be resolved for parameter {self._name}")
def process_params(
dag: DAG,
task: Operator,
dag_run: DagRun | None,
*,
suppress_exception: bool,
) -> dict[str, Any]:
"""Merge, validate params, and convert them into a simple dict."""
from airflow.configuration import conf
params = ParamsDict(suppress_exception=suppress_exception)
with contextlib.suppress(AttributeError):
params.update(dag.params)
if task.params:
params.update(task.params)
if conf.getboolean("core", "dag_run_conf_overrides_params") and dag_run and dag_run.conf:
logger.debug("Updating task params (%s) with DagRun.conf (%s)", params, dag_run.conf)
params.update(dag_run.conf)
return params.validate()
| 13,113 | 36.468571 | 107 |
py
|
airflow
|
airflow-main/airflow/models/expandinput.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections.abc
import functools
import operator
from typing import TYPE_CHECKING, Any, Dict, Iterable, Mapping, NamedTuple, Sequence, Sized, Union
import attr
from airflow.typing_compat import TypeGuard
from airflow.utils.context import Context
from airflow.utils.mixins import ResolveMixin
from airflow.utils.session import NEW_SESSION, provide_session
if TYPE_CHECKING:
from sqlalchemy.orm import Session
from airflow.models.operator import Operator
from airflow.models.xcom_arg import XComArg
ExpandInput = Union["DictOfListsExpandInput", "ListOfDictsExpandInput"]
# Each keyword argument to expand() can be an XComArg, sequence, or dict (not
# any mapping since we need the value to be ordered).
OperatorExpandArgument = Union["MappedArgument", "XComArg", Sequence, Dict[str, Any]]
# The single argument of expand_kwargs() can be an XComArg, or a list with each
# element being either an XComArg or a dict.
OperatorExpandKwargsArgument = Union["XComArg", Sequence[Union["XComArg", Mapping[str, Any]]]]
@attr.define(kw_only=True)
class MappedArgument(ResolveMixin):
"""Stand-in stub for task-group-mapping arguments.
This is very similar to an XComArg, but resolved differently. Declared here
(instead of in the task group module) to avoid import cycles.
"""
_input: ExpandInput
_key: str
def get_task_map_length(self, run_id: str, *, session: Session) -> int | None:
# TODO (AIP-42): Implement run-time task map length inspection. This is
# needed when we implement task mapping inside a mapped task group.
raise NotImplementedError()
def iter_references(self) -> Iterable[tuple[Operator, str]]:
yield from self._input.iter_references()
@provide_session
def resolve(self, context: Context, *, session: Session = NEW_SESSION) -> Any:
data, _ = self._input.resolve(context, session=session)
return data[self._key]
# To replace tedious isinstance() checks.
def is_mappable(v: Any) -> TypeGuard[OperatorExpandArgument]:
from airflow.models.xcom_arg import XComArg
return isinstance(v, (MappedArgument, XComArg, Mapping, Sequence)) and not isinstance(v, str)
# To replace tedious isinstance() checks.
def _is_parse_time_mappable(v: OperatorExpandArgument) -> TypeGuard[Mapping | Sequence]:
from airflow.models.xcom_arg import XComArg
return not isinstance(v, (MappedArgument, XComArg))
# To replace tedious isinstance() checks.
def _needs_run_time_resolution(v: OperatorExpandArgument) -> TypeGuard[MappedArgument | XComArg]:
from airflow.models.xcom_arg import XComArg
return isinstance(v, (MappedArgument, XComArg))
class NotFullyPopulated(RuntimeError):
"""Raise when ``get_map_lengths`` cannot populate all mapping metadata.
This is generally due to not all upstream tasks have finished when the
function is called.
"""
def __init__(self, missing: set[str]) -> None:
self.missing = missing
def __str__(self) -> str:
keys = ", ".join(repr(k) for k in sorted(self.missing))
return f"Failed to populate all mapping metadata; missing: {keys}"
class DictOfListsExpandInput(NamedTuple):
"""Storage type of a mapped operator's mapped kwargs.
This is created from ``expand(**kwargs)``.
"""
value: dict[str, OperatorExpandArgument]
def _iter_parse_time_resolved_kwargs(self) -> Iterable[tuple[str, Sized]]:
"""Generate kwargs with values available on parse-time."""
return ((k, v) for k, v in self.value.items() if _is_parse_time_mappable(v))
def get_parse_time_mapped_ti_count(self) -> int:
if not self.value:
return 0
literal_values = [len(v) for _, v in self._iter_parse_time_resolved_kwargs()]
if len(literal_values) != len(self.value):
literal_keys = (k for k, _ in self._iter_parse_time_resolved_kwargs())
raise NotFullyPopulated(set(self.value).difference(literal_keys))
return functools.reduce(operator.mul, literal_values, 1)
def _get_map_lengths(self, run_id: str, *, session: Session) -> dict[str, int]:
"""Return dict of argument name to map length.
If any arguments are not known right now (upstream task not finished),
they will not be present in the dict.
"""
# TODO: This initiates one database call for each XComArg. Would it be
# more efficient to do one single db call and unpack the value here?
def _get_length(v: OperatorExpandArgument) -> int | None:
if _needs_run_time_resolution(v):
return v.get_task_map_length(run_id, session=session)
# Unfortunately a user-defined TypeGuard cannot apply negative type
# narrowing. https://github.com/python/typing/discussions/1013
if TYPE_CHECKING:
assert isinstance(v, Sized)
return len(v)
map_lengths_iterator = ((k, _get_length(v)) for k, v in self.value.items())
map_lengths = {k: v for k, v in map_lengths_iterator if v is not None}
if len(map_lengths) < len(self.value):
raise NotFullyPopulated(set(self.value).difference(map_lengths))
return map_lengths
def get_total_map_length(self, run_id: str, *, session: Session) -> int:
if not self.value:
return 0
lengths = self._get_map_lengths(run_id, session=session)
return functools.reduce(operator.mul, (lengths[name] for name in self.value), 1)
def _expand_mapped_field(self, key: str, value: Any, context: Context, *, session: Session) -> Any:
if _needs_run_time_resolution(value):
value = value.resolve(context, session=session)
map_index = context["ti"].map_index
if map_index < 0:
raise RuntimeError("can't resolve task-mapping argument without expanding")
all_lengths = self._get_map_lengths(context["run_id"], session=session)
def _find_index_for_this_field(index: int) -> int:
# Need to use the original user input to retain argument order.
for mapped_key in reversed(list(self.value)):
mapped_length = all_lengths[mapped_key]
if mapped_length < 1:
raise RuntimeError(f"cannot expand field mapped to length {mapped_length!r}")
if mapped_key == key:
return index % mapped_length
index //= mapped_length
return -1
found_index = _find_index_for_this_field(map_index)
if found_index < 0:
return value
if isinstance(value, collections.abc.Sequence):
return value[found_index]
if not isinstance(value, dict):
raise TypeError(f"can't map over value of type {type(value)}")
for i, (k, v) in enumerate(value.items()):
if i == found_index:
return k, v
raise IndexError(f"index {map_index} is over mapped length")
def iter_references(self) -> Iterable[tuple[Operator, str]]:
from airflow.models.xcom_arg import XComArg
for x in self.value.values():
if isinstance(x, XComArg):
yield from x.iter_references()
def resolve(self, context: Context, session: Session) -> tuple[Mapping[str, Any], set[int]]:
data = {k: self._expand_mapped_field(k, v, context, session=session) for k, v in self.value.items()}
literal_keys = {k for k, _ in self._iter_parse_time_resolved_kwargs()}
resolved_oids = {id(v) for k, v in data.items() if k not in literal_keys}
return data, resolved_oids
def _describe_type(value: Any) -> str:
if value is None:
return "None"
return type(value).__name__
class ListOfDictsExpandInput(NamedTuple):
"""Storage type of a mapped operator's mapped kwargs.
This is created from ``expand_kwargs(xcom_arg)``.
"""
value: OperatorExpandKwargsArgument
def get_parse_time_mapped_ti_count(self) -> int:
if isinstance(self.value, collections.abc.Sized):
return len(self.value)
raise NotFullyPopulated({"expand_kwargs() argument"})
def get_total_map_length(self, run_id: str, *, session: Session) -> int:
if isinstance(self.value, collections.abc.Sized):
return len(self.value)
length = self.value.get_task_map_length(run_id, session=session)
if length is None:
raise NotFullyPopulated({"expand_kwargs() argument"})
return length
def iter_references(self) -> Iterable[tuple[Operator, str]]:
from airflow.models.xcom_arg import XComArg
if isinstance(self.value, XComArg):
yield from self.value.iter_references()
else:
for x in self.value:
if isinstance(x, XComArg):
yield from x.iter_references()
def resolve(self, context: Context, session: Session) -> tuple[Mapping[str, Any], set[int]]:
map_index = context["ti"].map_index
if map_index < 0:
raise RuntimeError("can't resolve task-mapping argument without expanding")
mapping: Any
if isinstance(self.value, collections.abc.Sized):
mapping = self.value[map_index]
if not isinstance(mapping, collections.abc.Mapping):
mapping = mapping.resolve(context, session)
else:
mappings = self.value.resolve(context, session)
if not isinstance(mappings, collections.abc.Sequence):
raise ValueError(f"expand_kwargs() expects a list[dict], not {_describe_type(mappings)}")
mapping = mappings[map_index]
if not isinstance(mapping, collections.abc.Mapping):
raise ValueError(f"expand_kwargs() expects a list[dict], not list[{_describe_type(mapping)}]")
for key in mapping:
if not isinstance(key, str):
raise ValueError(
f"expand_kwargs() input dict keys must all be str, "
f"but {key!r} is of type {_describe_type(key)}"
)
return mapping, {id(v) for v in mapping.values()}
EXPAND_INPUT_EMPTY = DictOfListsExpandInput({}) # Sentinel value.
_EXPAND_INPUT_TYPES = {
"dict-of-lists": DictOfListsExpandInput,
"list-of-dicts": ListOfDictsExpandInput,
}
def get_map_type_key(expand_input: ExpandInput) -> str:
return next(k for k, v in _EXPAND_INPUT_TYPES.items() if v == type(expand_input))
def create_expand_input(kind: str, value: Any) -> ExpandInput:
return _EXPAND_INPUT_TYPES[kind](value)
| 11,445 | 39.161404 | 108 |
py
|
airflow
|
airflow-main/airflow/models/taskinstancekey.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
class TaskInstanceKey(NamedTuple):
"""Key used to identify task instance."""
dag_id: str
task_id: str
run_id: str
try_number: int = 1
map_index: int = -1
@property
def primary(self) -> tuple[str, str, str, int]:
"""Return task instance primary key part of the key."""
return self.dag_id, self.task_id, self.run_id, self.map_index
@property
def reduced(self) -> TaskInstanceKey:
"""Remake the key by subtracting 1 from try number to match in memory information."""
return TaskInstanceKey(
self.dag_id, self.task_id, self.run_id, max(1, self.try_number - 1), self.map_index
)
def with_try_number(self, try_number: int) -> TaskInstanceKey:
"""Returns TaskInstanceKey with provided ``try_number``."""
return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, try_number, self.map_index)
@property
def key(self) -> TaskInstanceKey:
"""For API-compatibly with TaskInstance.
Returns self
"""
return self
| 1,915 | 33.836364 | 98 |
py
|
airflow
|
airflow-main/airflow/config_templates/default_webserver_config.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Default configuration for the Airflow webserver."""
from __future__ import annotations
import os
from airflow.www.fab_security.manager import AUTH_DB
# from airflow.www.fab_security.manager import AUTH_LDAP
# from airflow.www.fab_security.manager import AUTH_OAUTH
# from airflow.www.fab_security.manager import AUTH_OID
# from airflow.www.fab_security.manager import AUTH_REMOTE_USER
basedir = os.path.abspath(os.path.dirname(__file__))
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = True
WTF_CSRF_TIME_LIMIT = None
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# For details on how to set up each of the following authentication, see
# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods
# for details.
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
# AUTH_OAUTH : Is for OAuth
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment and set to desired role to enable access without authentication
# AUTH_ROLE_PUBLIC = 'Viewer'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The recaptcha it's automatically enabled for user self registration is active and the keys are necessary
# RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY
# RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY
# Config for Flask-Mail necessary for user self registration
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_USE_TLS = True
# MAIL_USERNAME = '[email protected]'
# MAIL_PASSWORD = 'passwordformail'
# MAIL_DEFAULT_SENDER = '[email protected]'
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using OAuth Auth, uncomment to setup provider(s) info
# Google OAuth example:
# OAUTH_PROVIDERS = [{
# 'name':'google',
# 'token_key':'access_token',
# 'icon':'fa-google',
# 'remote_app': {
# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',
# 'client_kwargs':{
# 'scope': 'email profile'
# },
# 'access_token_url':'https://accounts.google.com/o/oauth2/token',
# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',
# 'request_token_url': None,
# 'client_id': GOOGLE_KEY,
# 'client_secret': GOOGLE_SECRET_KEY,
# }
# }]
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# When using OpenID Auth, uncomment to setup OpenID providers.
# example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
# ----------------------------------------------------
# Theme CONFIG
# ----------------------------------------------------
# Flask App Builder comes up with a number of predefined themes
# that you can use for Apache Airflow.
# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes
# Please make sure to remove "navbar_color" configuration from airflow.cfg
# in order to fully utilize the theme. (or use that property in conjunction with theme)
# APP_THEME = "bootstrap-theme.css" # default bootstrap
# APP_THEME = "amelia.css"
# APP_THEME = "cerulean.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css"
# APP_THEME = "darkly.css"
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "lumen.css"
# APP_THEME = "paper.css"
# APP_THEME = "readable.css"
# APP_THEME = "sandstone.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css"
# APP_THEME = "solar.css"
# APP_THEME = "spacelab.css"
# APP_THEME = "superhero.css"
# APP_THEME = "united.css"
# APP_THEME = "yeti.css"
| 4,771 | 35.151515 | 106 |
py
|
airflow
|
airflow-main/airflow/config_templates/airflow_local_settings.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Airflow logging settings."""
from __future__ import annotations
import os
from pathlib import Path
from typing import Any
from urllib.parse import urlsplit
from airflow.configuration import conf
from airflow.exceptions import AirflowException
LOG_LEVEL: str = conf.get_mandatory_value("logging", "LOGGING_LEVEL").upper()
# Flask appbuilder's info level log is very verbose,
# so it's set to 'WARN' by default.
FAB_LOG_LEVEL: str = conf.get_mandatory_value("logging", "FAB_LOGGING_LEVEL").upper()
LOG_FORMAT: str = conf.get_mandatory_value("logging", "LOG_FORMAT")
DAG_PROCESSOR_LOG_FORMAT: str = conf.get_mandatory_value("logging", "DAG_PROCESSOR_LOG_FORMAT")
LOG_FORMATTER_CLASS: str = conf.get_mandatory_value(
"logging", "LOG_FORMATTER_CLASS", fallback="airflow.utils.log.timezone_aware.TimezoneAware"
)
COLORED_LOG_FORMAT: str = conf.get_mandatory_value("logging", "COLORED_LOG_FORMAT")
COLORED_LOG: bool = conf.getboolean("logging", "COLORED_CONSOLE_LOG")
COLORED_FORMATTER_CLASS: str = conf.get_mandatory_value("logging", "COLORED_FORMATTER_CLASS")
DAG_PROCESSOR_LOG_TARGET: str = conf.get_mandatory_value("logging", "DAG_PROCESSOR_LOG_TARGET")
BASE_LOG_FOLDER: str = conf.get_mandatory_value("logging", "BASE_LOG_FOLDER")
PROCESSOR_LOG_FOLDER: str = conf.get_mandatory_value("scheduler", "CHILD_PROCESS_LOG_DIRECTORY")
DAG_PROCESSOR_MANAGER_LOG_LOCATION: str = conf.get_mandatory_value(
"logging", "DAG_PROCESSOR_MANAGER_LOG_LOCATION"
)
# FILENAME_TEMPLATE only uses in Remote Logging Handlers since Airflow 2.3.3
# All of these handlers inherited from FileTaskHandler and providing any value rather than None
# would raise deprecation warning.
FILENAME_TEMPLATE: str | None = None
PROCESSOR_FILENAME_TEMPLATE: str = conf.get_mandatory_value("logging", "LOG_PROCESSOR_FILENAME_TEMPLATE")
DEFAULT_LOGGING_CONFIG: dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"airflow": {
"format": LOG_FORMAT,
"class": LOG_FORMATTER_CLASS,
},
"airflow_coloured": {
"format": COLORED_LOG_FORMAT if COLORED_LOG else LOG_FORMAT,
"class": COLORED_FORMATTER_CLASS if COLORED_LOG else LOG_FORMATTER_CLASS,
},
"source_processor": {
"format": DAG_PROCESSOR_LOG_FORMAT,
"class": LOG_FORMATTER_CLASS,
},
},
"filters": {
"mask_secrets": {
"()": "airflow.utils.log.secrets_masker.SecretsMasker",
},
},
"handlers": {
"console": {
"class": "airflow.utils.log.logging_mixin.RedirectStdHandler",
"formatter": "airflow_coloured",
"stream": "sys.stdout",
"filters": ["mask_secrets"],
},
"task": {
"class": "airflow.utils.log.file_task_handler.FileTaskHandler",
"formatter": "airflow",
"base_log_folder": os.path.expanduser(BASE_LOG_FOLDER),
"filters": ["mask_secrets"],
},
"processor": {
"class": "airflow.utils.log.file_processor_handler.FileProcessorHandler",
"formatter": "airflow",
"base_log_folder": os.path.expanduser(PROCESSOR_LOG_FOLDER),
"filename_template": PROCESSOR_FILENAME_TEMPLATE,
"filters": ["mask_secrets"],
},
"processor_to_stdout": {
"class": "airflow.utils.log.logging_mixin.RedirectStdHandler",
"formatter": "source_processor",
"stream": "sys.stdout",
"filters": ["mask_secrets"],
},
},
"loggers": {
"airflow.processor": {
"handlers": ["processor_to_stdout" if DAG_PROCESSOR_LOG_TARGET == "stdout" else "processor"],
"level": LOG_LEVEL,
# Set to true here (and reset via set_context) so that if no file is configured we still get logs!
"propagate": True,
},
"airflow.task": {
"handlers": ["task"],
"level": LOG_LEVEL,
# Set to true here (and reset via set_context) so that if no file is configured we still get logs!
"propagate": True,
"filters": ["mask_secrets"],
},
"flask_appbuilder": {
"handlers": ["console"],
"level": FAB_LOG_LEVEL,
"propagate": True,
},
},
"root": {
"handlers": ["console"],
"level": LOG_LEVEL,
"filters": ["mask_secrets"],
},
}
EXTRA_LOGGER_NAMES: str | None = conf.get("logging", "EXTRA_LOGGER_NAMES", fallback=None)
if EXTRA_LOGGER_NAMES:
new_loggers = {
logger_name.strip(): {
"handlers": ["console"],
"level": LOG_LEVEL,
"propagate": True,
}
for logger_name in EXTRA_LOGGER_NAMES.split(",")
}
DEFAULT_LOGGING_CONFIG["loggers"].update(new_loggers)
DEFAULT_DAG_PARSING_LOGGING_CONFIG: dict[str, dict[str, dict[str, Any]]] = {
"handlers": {
"processor_manager": {
"class": "airflow.utils.log.non_caching_file_handler.NonCachingRotatingFileHandler",
"formatter": "airflow",
"filename": DAG_PROCESSOR_MANAGER_LOG_LOCATION,
"mode": "a",
"maxBytes": 104857600, # 100MB
"backupCount": 5,
}
},
"loggers": {
"airflow.processor_manager": {
"handlers": ["processor_manager"],
"level": LOG_LEVEL,
"propagate": False,
}
},
}
# Only update the handlers and loggers when CONFIG_PROCESSOR_MANAGER_LOGGER is set.
# This is to avoid exceptions when initializing RotatingFileHandler multiple times
# in multiple processes.
if os.environ.get("CONFIG_PROCESSOR_MANAGER_LOGGER") == "True":
DEFAULT_LOGGING_CONFIG["handlers"].update(DEFAULT_DAG_PARSING_LOGGING_CONFIG["handlers"])
DEFAULT_LOGGING_CONFIG["loggers"].update(DEFAULT_DAG_PARSING_LOGGING_CONFIG["loggers"])
# Manually create log directory for processor_manager handler as RotatingFileHandler
# will only create file but not the directory.
processor_manager_handler_config: dict[str, Any] = DEFAULT_DAG_PARSING_LOGGING_CONFIG["handlers"][
"processor_manager"
]
directory: str = os.path.dirname(processor_manager_handler_config["filename"])
Path(directory).mkdir(parents=True, exist_ok=True, mode=0o755)
##################
# Remote logging #
##################
REMOTE_LOGGING: bool = conf.getboolean("logging", "remote_logging")
if REMOTE_LOGGING:
ELASTICSEARCH_HOST: str | None = conf.get("elasticsearch", "HOST")
# Storage bucket URL for remote logging
# S3 buckets should start with "s3://"
# Cloudwatch log groups should start with "cloudwatch://"
# GCS buckets should start with "gs://"
# WASB buckets should start with "wasb"
# HDFS path should start with "hdfs://"
# just to help Airflow select correct handler
REMOTE_BASE_LOG_FOLDER: str = conf.get_mandatory_value("logging", "REMOTE_BASE_LOG_FOLDER")
REMOTE_TASK_HANDLER_KWARGS = conf.getjson("logging", "REMOTE_TASK_HANDLER_KWARGS", fallback={})
if REMOTE_BASE_LOG_FOLDER.startswith("s3://"):
S3_REMOTE_HANDLERS: dict[str, dict[str, str | None]] = {
"task": {
"class": "airflow.providers.amazon.aws.log.s3_task_handler.S3TaskHandler",
"formatter": "airflow",
"base_log_folder": str(os.path.expanduser(BASE_LOG_FOLDER)),
"s3_log_folder": REMOTE_BASE_LOG_FOLDER,
"filename_template": FILENAME_TEMPLATE,
},
}
DEFAULT_LOGGING_CONFIG["handlers"].update(S3_REMOTE_HANDLERS)
elif REMOTE_BASE_LOG_FOLDER.startswith("cloudwatch://"):
url_parts = urlsplit(REMOTE_BASE_LOG_FOLDER)
CLOUDWATCH_REMOTE_HANDLERS: dict[str, dict[str, str | None]] = {
"task": {
"class": "airflow.providers.amazon.aws.log.cloudwatch_task_handler.CloudwatchTaskHandler",
"formatter": "airflow",
"base_log_folder": str(os.path.expanduser(BASE_LOG_FOLDER)),
"log_group_arn": url_parts.netloc + url_parts.path,
"filename_template": FILENAME_TEMPLATE,
},
}
DEFAULT_LOGGING_CONFIG["handlers"].update(CLOUDWATCH_REMOTE_HANDLERS)
elif REMOTE_BASE_LOG_FOLDER.startswith("gs://"):
key_path = conf.get_mandatory_value("logging", "GOOGLE_KEY_PATH", fallback=None)
GCS_REMOTE_HANDLERS: dict[str, dict[str, str | None]] = {
"task": {
"class": "airflow.providers.google.cloud.log.gcs_task_handler.GCSTaskHandler",
"formatter": "airflow",
"base_log_folder": str(os.path.expanduser(BASE_LOG_FOLDER)),
"gcs_log_folder": REMOTE_BASE_LOG_FOLDER,
"filename_template": FILENAME_TEMPLATE,
"gcp_key_path": key_path,
},
}
DEFAULT_LOGGING_CONFIG["handlers"].update(GCS_REMOTE_HANDLERS)
elif REMOTE_BASE_LOG_FOLDER.startswith("wasb"):
WASB_REMOTE_HANDLERS: dict[str, dict[str, str | bool | None]] = {
"task": {
"class": "airflow.providers.microsoft.azure.log.wasb_task_handler.WasbTaskHandler",
"formatter": "airflow",
"base_log_folder": str(os.path.expanduser(BASE_LOG_FOLDER)),
"wasb_log_folder": REMOTE_BASE_LOG_FOLDER,
"wasb_container": "airflow-logs",
"filename_template": FILENAME_TEMPLATE,
},
}
DEFAULT_LOGGING_CONFIG["handlers"].update(WASB_REMOTE_HANDLERS)
elif REMOTE_BASE_LOG_FOLDER.startswith("stackdriver://"):
key_path = conf.get_mandatory_value("logging", "GOOGLE_KEY_PATH", fallback=None)
# stackdriver:///airflow-tasks => airflow-tasks
log_name = urlsplit(REMOTE_BASE_LOG_FOLDER).path[1:]
STACKDRIVER_REMOTE_HANDLERS = {
"task": {
"class": "airflow.providers.google.cloud.log.stackdriver_task_handler.StackdriverTaskHandler",
"formatter": "airflow",
"name": log_name,
"gcp_key_path": key_path,
}
}
DEFAULT_LOGGING_CONFIG["handlers"].update(STACKDRIVER_REMOTE_HANDLERS)
elif REMOTE_BASE_LOG_FOLDER.startswith("oss://"):
OSS_REMOTE_HANDLERS = {
"task": {
"class": "airflow.providers.alibaba.cloud.log.oss_task_handler.OSSTaskHandler",
"formatter": "airflow",
"base_log_folder": os.path.expanduser(BASE_LOG_FOLDER),
"oss_log_folder": REMOTE_BASE_LOG_FOLDER,
"filename_template": FILENAME_TEMPLATE,
},
}
DEFAULT_LOGGING_CONFIG["handlers"].update(OSS_REMOTE_HANDLERS)
elif REMOTE_BASE_LOG_FOLDER.startswith("hdfs://"):
HDFS_REMOTE_HANDLERS: dict[str, dict[str, str | None]] = {
"task": {
"class": "airflow.providers.apache.hdfs.log.hdfs_task_handler.HdfsTaskHandler",
"formatter": "airflow",
"base_log_folder": str(os.path.expanduser(BASE_LOG_FOLDER)),
"hdfs_log_folder": REMOTE_BASE_LOG_FOLDER,
"filename_template": FILENAME_TEMPLATE,
},
}
DEFAULT_LOGGING_CONFIG["handlers"].update(HDFS_REMOTE_HANDLERS)
elif ELASTICSEARCH_HOST:
ELASTICSEARCH_END_OF_LOG_MARK: str = conf.get_mandatory_value("elasticsearch", "END_OF_LOG_MARK")
ELASTICSEARCH_FRONTEND: str = conf.get_mandatory_value("elasticsearch", "frontend")
ELASTICSEARCH_WRITE_STDOUT: bool = conf.getboolean("elasticsearch", "WRITE_STDOUT")
ELASTICSEARCH_JSON_FORMAT: bool = conf.getboolean("elasticsearch", "JSON_FORMAT")
ELASTICSEARCH_JSON_FIELDS: str = conf.get_mandatory_value("elasticsearch", "JSON_FIELDS")
ELASTICSEARCH_HOST_FIELD: str = conf.get_mandatory_value("elasticsearch", "HOST_FIELD")
ELASTICSEARCH_OFFSET_FIELD: str = conf.get_mandatory_value("elasticsearch", "OFFSET_FIELD")
ELASTIC_REMOTE_HANDLERS: dict[str, dict[str, str | bool | None]] = {
"task": {
"class": "airflow.providers.elasticsearch.log.es_task_handler.ElasticsearchTaskHandler",
"formatter": "airflow",
"base_log_folder": str(os.path.expanduser(BASE_LOG_FOLDER)),
"filename_template": FILENAME_TEMPLATE,
"end_of_log_mark": ELASTICSEARCH_END_OF_LOG_MARK,
"host": ELASTICSEARCH_HOST,
"frontend": ELASTICSEARCH_FRONTEND,
"write_stdout": ELASTICSEARCH_WRITE_STDOUT,
"json_format": ELASTICSEARCH_JSON_FORMAT,
"json_fields": ELASTICSEARCH_JSON_FIELDS,
"host_field": ELASTICSEARCH_HOST_FIELD,
"offset_field": ELASTICSEARCH_OFFSET_FIELD,
},
}
DEFAULT_LOGGING_CONFIG["handlers"].update(ELASTIC_REMOTE_HANDLERS)
else:
raise AirflowException(
"Incorrect remote log configuration. Please check the configuration of option 'host' in "
"section 'elasticsearch' if you are using Elasticsearch. In the other case, "
"'remote_base_log_folder' option in the 'logging' section."
)
DEFAULT_LOGGING_CONFIG["handlers"]["task"].update(REMOTE_TASK_HANDLER_KWARGS)
| 14,246 | 42.042296 | 110 |
py
|
airflow
|
airflow-main/airflow/config_templates/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.utils.deprecation_tools import add_deprecated_classes
__deprecated_classes = {
"default_celery": {
"DEFAULT_CELERY_CONFIG": "airflow.providers.celery.executors.default_celery.DEFAULT_CELERY_CONFIG",
},
}
add_deprecated_classes(__deprecated_classes, __name__)
| 1,113 | 37.413793 | 107 |
py
|
airflow
|
airflow-main/airflow/_vendor/__init__.py
| 0 | 0 | 0 |
py
|
|
airflow
|
airflow-main/airflow/dag_processing/processor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import importlib
import logging
import multiprocessing
import os
import signal
import threading
import time
import zipfile
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import datetime, timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Iterable, Iterator
from setproctitle import setproctitle
from sqlalchemy import delete, exc, func, or_
from sqlalchemy.orm.session import Session
from airflow import settings
from airflow.api_internal.internal_api_call import internal_api_call
from airflow.callbacks.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.models import SlaMiss, errors
from airflow.models.dag import DAG, DagModel
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun as DR
from airflow.models.dagwarning import DagWarning, DagWarningType
from airflow.models.taskinstance import TaskInstance as TI
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.file import iter_airflow_imports, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import TaskInstanceState
if TYPE_CHECKING:
from airflow.models.operator import Operator
class DagFileProcessorProcess(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Runs DAG processing in a separate process using DagFileProcessor.
:param file_path: a Python file containing Airflow DAG definitions
:param pickle_dags: whether to serialize the DAG objects to the DB
:param dag_ids: If specified, only look at these DAG ID's
:param callback_requests: failure callback to execute
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: list[str] | None,
dag_directory: str,
callback_requests: list[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._dag_directory = dag_directory
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: multiprocessing.process.BaseProcess | None = None
# The result of DagFileProcessor.process_file(file_path).
self._result: tuple[int, int] | None = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: datetime | None = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: MultiprocessingConnection | None = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: list[str] | None,
thread_name: str,
dag_directory: str,
callback_requests: list[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:param parent_channel: the parent end of the channel to close in the child
:param file_path: the file to process
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:param thread_name: the name to use for the process that is launched
:param callback_requests: failure callback to execute
:return: the process that was launched
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
def _handle_dag_file_processing():
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, dag_directory=dag_directory, log=log)
result: tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
try:
DAG_PROCESSOR_LOG_TARGET = conf.get_mandatory_value("logging", "DAG_PROCESSOR_LOG_TARGET")
if DAG_PROCESSOR_LOG_TARGET == "stdout":
with Stats.timer() as timer:
_handle_dag_file_processing()
else:
# The following line ensures that stdout goes to the same destination as the logs. If stdout
# gets sent to logs and logs are sent to stdout, this leads to an infinite loop. This
# necessitates this conditional based on the value of DAG_PROCESSOR_LOG_TARGET.
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
_handle_dag_file_processing()
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
if conf.getboolean("scheduler", "parsing_pre_import_modules", fallback=True):
# Read the file to pre-import airflow modules used.
# This prevents them from being re-imported from zero in each "processing" process
# and saves CPU time and memory.
zip_file_paths = []
if zipfile.is_zipfile(self.file_path):
try:
with zipfile.ZipFile(self.file_path) as z:
zip_file_paths.extend(
[
os.path.join(self.file_path, info.filename)
for info in z.infolist()
if might_contain_dag(info.filename, True, z)
]
)
except zipfile.BadZipFile as err:
self.log.error("There was an err accessing %s, %s", self.file_path, err)
if zip_file_paths:
self.import_modules(zip_file_paths)
else:
self.import_modules(self.file_path)
context = self._get_multiprocessing_context()
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._dag_directory,
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
# Reap the spawned zombie. We active wait, because in Python 3.9 `waitpid` might lead to an
# exception, due to change in Python standard library and possibility of race condition
# see https://bugs.python.org/issue42558
while self._process._popen.poll() is None: # type: ignore
time.sleep(0.001)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""PID of the process launched to process the given file."""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> int | None:
"""
After the process is finished, this can be called to get the return code.
:return: the exit code of the process
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> tuple[int, int] | None:
"""Result of running ``DagFileProcessor.process_file()``."""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime:
"""Time when this started to process the file."""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
def import_modules(self, file_path: str | Iterable[str]):
def _import_modules(filepath):
for module in iter_airflow_imports(filepath):
try:
importlib.import_module(module)
except Exception as e:
# only log as warning because an error here is not preventing anything from working, and
# if it's serious, it's going to be surfaced to the user when the dag is actually parsed.
self.log.warning(
"Error when trying to pre-import module '%s' found in %s: %s",
module,
file_path,
e,
)
if isinstance(file_path, str):
_import_modules(file_path)
elif isinstance(file_path, Iterable):
for path in file_path:
_import_modules(path)
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:param log: Logger to save the processing process
"""
UNIT_TEST_MODE: bool = conf.getboolean("core", "UNIT_TEST_MODE")
def __init__(self, dag_ids: list[str] | None, dag_directory: str, log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
self._dag_directory = dag_directory
self.dag_warnings: set[tuple[str, str]] = set()
@classmethod
@internal_api_call
@provide_session
def manage_slas(cls, dag_folder, dag_id: str, session: Session = NEW_SESSION) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails when needed.
New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
dagbag = DagFileProcessor._get_dagbag(dag_folder)
dag = dagbag.get_dag(dag_id)
cls.logger().info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
cls.logger().info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(DR.execution_date).label("max_ti"))
.join(TI.dag_run)
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == TaskInstanceState.SUCCESS, TI.state == TaskInstanceState.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery("sq")
)
# get recorded SlaMiss
recorded_slas_query = set(
session.query(SlaMiss.dag_id, SlaMiss.task_id, SlaMiss.execution_date).filter(
SlaMiss.dag_id == dag.dag_id, SlaMiss.task_id.in_(dag.task_ids)
)
)
max_tis: Iterator[TI] = (
session.query(TI)
.join(TI.dag_run)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
DR.execution_date == qry.c.max_ti,
)
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not task.sla:
continue
if not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
sla_misses = []
next_info = dag.next_dagrun_info(dag.get_run_data_interval(ti.dag_run), restricted=False)
while next_info and next_info.logical_date < ts:
next_info = dag.next_dagrun_info(next_info.data_interval, restricted=False)
if next_info is None:
break
if (ti.dag_id, ti.task_id, next_info.logical_date) in recorded_slas_query:
continue
if next_info.logical_date + task.sla < ts:
sla_miss = SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=next_info.logical_date,
timestamp=ts,
)
sla_misses.append(sla_miss)
Stats.incr("sla_missed", tags={"dag_id": ti.dag_id, "task_id": ti.task_id})
if sla_misses:
session.add_all(sla_misses)
session.commit()
slas: list[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
if slas:
sla_dates: list[datetime] = [sla.execution_date for sla in slas]
fetched_tis: list[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.execution_date.in_(sla_dates),
TI.state != TaskInstanceState.SUCCESS,
)
.all()
)
blocking_tis: list[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + " on " + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + " on " + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
callbacks = (
dag.sla_miss_callback
if isinstance(dag.sla_miss_callback, list)
else [dag.sla_miss_callback]
)
for callback in callbacks:
cls.logger().info("Calling SLA miss callback %s", callback)
try:
callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception:
Stats.incr(
"sla_callback_notification_failure",
tags={
"dag_id": dag.dag_id,
"func_name": callback.__name__,
},
)
cls.logger().exception(
"Could not call sla_miss_callback(%s) for DAG %s",
callback.__name__,
dag.dag_id,
)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
cls.logger().warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception:
Stats.incr("sla_email_notification_failure", tags={"dag_id": dag.dag_id})
cls.logger().exception(
"Could not send SLA Miss email notification for DAG %s", dag.dag_id
)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
@internal_api_call
@provide_session
def update_import_errors(
file_last_changed: dict[str, datetime], import_errors: dict[str, str], session: Session = NEW_SESSION
) -> None:
"""
Update any import errors to be displayed in the UI.
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param dagbag: DagBag containing DAGs with import errors
:param session: session for ORM operations
"""
files_without_error = file_last_changed - import_errors.keys()
# Clear the errors of the processed files
# that no longer have errors
for dagbag_file in files_without_error:
session.execute(
delete(errors.ImportError)
.where(errors.ImportError.filename.startswith(dagbag_file))
.execution_options(synchronize_session="fetch")
)
# files that still have errors
existing_import_error_files = [x.filename for x in session.query(errors.ImportError.filename).all()]
# Add the errors of the processed files
for filename, stacktrace in import_errors.items():
if filename in existing_import_error_files:
session.query(errors.ImportError).filter(errors.ImportError.filename == filename).update(
dict(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace),
synchronize_session="fetch",
)
else:
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
(
session.query(DagModel)
.filter(DagModel.fileloc == filename)
.update({"has_import_errors": True}, synchronize_session="fetch")
)
session.commit()
@provide_session
def _validate_task_pools(self, *, dagbag: DagBag, session: Session = NEW_SESSION):
"""Validates and raise exception if any task in a dag is using a non-existent pool."""
from airflow.models.pool import Pool
def check_pools(dag):
task_pools = {task.pool for task in dag.tasks}
nonexistent_pools = task_pools - pools
if nonexistent_pools:
return (
f"Dag '{dag.dag_id}' references non-existent pools: {list(sorted(nonexistent_pools))!r}"
)
pools = {p.pool for p in Pool.get_pools(session)}
for dag in dagbag.dags.values():
message = check_pools(dag)
if message:
self.dag_warnings.add(DagWarning(dag.dag_id, DagWarningType.NONEXISTENT_POOL, message))
for subdag in dag.subdags:
message = check_pools(subdag)
if message:
self.dag_warnings.add(DagWarning(subdag.dag_id, DagWarningType.NONEXISTENT_POOL, message))
def update_dag_warnings(self, *, session: Session, dagbag: DagBag) -> None:
"""
Update any import warnings to be displayed in the UI.
For the DAGs in the given DagBag, record any associated configuration warnings and clear
warnings for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:param dagbag: DagBag containing DAGs with configuration warnings
"""
self._validate_task_pools(dagbag=dagbag)
stored_warnings = set(
session.query(DagWarning).filter(DagWarning.dag_id.in_(dagbag.dags.keys())).all()
)
for warning_to_delete in stored_warnings - self.dag_warnings:
session.delete(warning_to_delete)
for warning_to_add in self.dag_warnings:
session.merge(warning_to_add)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: list[CallbackRequest], session: Session = NEW_SESSION
) -> None:
"""
Execute on failure callbacks.
These objects can come from SchedulerJobRunner or from DagProcessorJobRunner.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request, session=session)
elif isinstance(request, SlaCallbackRequest):
DagFileProcessor.manage_slas(dagbag.dag_folder, request.dag_id, session=session)
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception:
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.flush()
def execute_callbacks_without_dag(
self, callback_requests: list[CallbackRequest], session: Session
) -> None:
"""
Execute what callbacks we can as "best effort" when the dag cannot be found/had parse errors.
This is so important so that tasks that failed when there is a parse
error don't get stuck in queued state.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(None, request, session)
else:
self.log.info(
"Not executing %s callback for file %s as there was a dag parse error",
request.__class__.__name__,
request.full_filepath,
)
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(run_id=request.run_id, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag | None, request: TaskCallbackRequest, session: Session):
if not request.is_failure_callback:
return
simple_ti = request.simple_task_instance
ti: TI | None = (
session.query(TI)
.filter_by(
dag_id=simple_ti.dag_id,
run_id=simple_ti.run_id,
task_id=simple_ti.task_id,
map_index=simple_ti.map_index,
)
.one_or_none()
)
if not ti:
return
task: Operator | None = None
if dagbag and simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
else:
# We don't have the _real_ dag here (perhaps it had a parse error?) but we still want to run
# `handle_failure` so that the state of the TI gets progressed.
#
# Since handle_failure _really_ wants a task, we do our best effort to give it one
from airflow.models.serialized_dag import SerializedDagModel
try:
model = session.get(SerializedDagModel, simple_ti.dag_id)
if model:
task = model.dag.get_task(simple_ti.task_id)
except (exc.NoResultFound, TaskNotFound):
pass
if task:
ti.refresh_from_task(task)
ti.handle_failure(error=request.msg, test_mode=self.UNIT_TEST_MODE, session=session)
self.log.info("Executed failure callback for %s in state %s", ti, ti.state)
session.flush()
@classmethod
def _get_dagbag(cls, file_path: str):
try:
return DagBag(file_path, include_examples=False)
except Exception:
cls.logger().exception("Failed at reloading the DAG file %s", file_path)
Stats.incr("dag_file_refresh_error", tags={"file_path": file_path})
raise
@provide_session
def process_file(
self,
file_path: str,
callback_requests: list[CallbackRequest],
pickle_dags: bool = False,
session: Session = NEW_SESSION,
) -> tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Mark any DAGs which are no longer present as inactive
6. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:param callback_requests: failure callback to execute
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:param session: Sqlalchemy ORM Session
:return: number of dags found, count of import errors
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagFileProcessor._get_dagbag(file_path)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr("dag_file_refresh_error", 1, 1, tags={"file_path": file_path})
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
DagFileProcessor.update_import_errors(
file_last_changed=dagbag.file_last_changed,
import_errors=dagbag.import_errors,
session=session,
)
if callback_requests:
# If there were callback requests for this file but there was a
# parse error we still need to progress the state of TIs,
# otherwise they might be stuck in queued/running for ever!
self.execute_callbacks_without_dag(callback_requests, session)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests, session)
session.commit()
serialize_errors = DagFileProcessor.save_dag_to_db(
dags=dagbag.dags,
dag_directory=self._dag_directory,
pickle_dags=pickle_dags,
)
dagbag.import_errors.update(dict(serialize_errors))
# Record import errors into the ORM
try:
DagFileProcessor.update_import_errors(
file_last_changed=dagbag.file_last_changed,
import_errors=dagbag.import_errors,
session=session,
)
except Exception:
self.log.exception("Error logging import errors!")
# Record DAG warnings in the metadatabase.
try:
self.update_dag_warnings(session=session, dagbag=dagbag)
except Exception:
self.log.exception("Error logging DAG warnings.")
return len(dagbag.dags), len(dagbag.import_errors)
@staticmethod
@internal_api_call
@provide_session
def save_dag_to_db(
dags: dict[str, DAG],
dag_directory: str,
pickle_dags: bool = False,
session=NEW_SESSION,
):
import_errors = DagBag._sync_to_db(dags=dags, processor_subdir=dag_directory, session=session)
session.commit()
dag_ids = list(dags)
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dag_ids)
unpaused_dags: list[DAG] = [dag for dag_id, dag in dags.items() if dag_id not in paused_dag_ids]
for dag in unpaused_dags:
dag.pickle(session)
return import_errors
| 37,265 | 39.906696 | 110 |
py
|
airflow
|
airflow-main/airflow/dag_processing/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/dag_processing/manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
from __future__ import annotations
import collections
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
import zipfile
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from pathlib import Path
from typing import Any, Callable, Iterator, NamedTuple, cast
from setproctitle import setproctitle
from sqlalchemy.orm import Session
from tabulate import tabulate
import airflow.models
from airflow.api_internal.internal_api_call import internal_api_call
from airflow.callbacks.callback_requests import CallbackRequest, SlaCallbackRequest
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import errors
from airflow.models.dag import DagModel
from airflow.models.dagwarning import DagWarning
from airflow.models.db_callback_request import DbCallbackRequest
from airflow.models.serialized_dag import SerializedDagModel
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import (
kill_child_processes_by_pids,
reap_process_group,
set_new_process_group,
)
from airflow.utils.retries import retry_db_transaction
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
class DagParsingStat(NamedTuple):
"""Information on processing progress."""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file."""
num_dags: int
import_errors: int
last_finish_time: datetime | None
last_duration: timedelta | None
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = "agent_run_once"
TERMINATE_MANAGER = "terminate_manager"
END_MANAGER = "end_manager"
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing.
It is responsible for all DAG parsing related jobs in scheduler process.
Mainly it can spin up DagFileProcessorManager in a subprocess,
collect DAG parsing results from it and communicate signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: Whether to start agent in async mode
"""
def __init__(
self,
dag_directory: os.PathLike,
max_runs: int,
processor_timeout: timedelta,
dag_ids: list[str] | None,
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._dag_directory: os.PathLike = dag_directory
self._max_runs = max_runs
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: dict[str, DagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: multiprocessing.process.BaseProcess | None = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: MultiprocessingConnection | None = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
context = self._get_multiprocessing_context()
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one processing "loop".
Call wait_until_finished to ensure that any launched processors have finished before continuing.
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def get_callbacks_pipe(self) -> MultiprocessingConnection:
"""Returns the pipe for sending Callbacks to DagProcessorManager."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
return self._parent_signal_conn
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: os.PathLike,
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: list[str] | None,
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
set_new_process_group()
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ["CONFIG_PROCESSOR_MANAGER_LOGGER"] = "True"
os.environ["AIRFLOW__LOGGING__COLORED_CONSOLE_LOG"] = "False"
# Replicating the behavior of how logging module was loaded
# in logging_config.py
# TODO: This reloading should be removed when we fix our logging behaviour
# In case of "spawn" method of starting processes for multiprocessing, reinitializing of the
# SQLAlchemy engine causes extremely unexpected behaviour of messing with objects already loaded
# in a parent process (likely via resources shared in memory by the ORM libraries).
# This caused flaky tests in our CI for many months and has been discovered while
# iterating on https://github.com/apache/airflow/pull/19860
# The issue that describes the problem and possible remediation is
# at https://github.com/apache/airflow/issues/19934
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit(".", 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ["CONFIG_PROCESSOR_MANAGER_LOGGER"]
processor_manager = DagFileProcessorManager(
dag_directory=dag_directory,
max_runs=max_runs,
processor_timeout=processor_timeout,
dag_ids=dag_ids,
pickle_dags=pickle_dags,
signal_conn=signal_conn,
async_mode=async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr("dag_processing.manager_stalls")
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Whether the DagFileProcessorManager finished."""
return self._done
@property
def all_files_processed(self):
"""Whether all files been processed at least once."""
return self._all_files_processed
def terminate(self):
"""Send termination signal to DAG parsing processor manager to terminate all DAG file processors."""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""Terminate (and then kill) the manager process launched."""
if not self._process:
self.log.warning("Ending without manager process.")
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Manage processes responsible for parsing DAGs.
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param signal_conn: connection to communicate signal with processor agent.
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: whether to start the manager in async mode
"""
DEFAULT_FILE_STAT = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
def __init__(
self,
dag_directory: os.PathLike[str],
max_runs: int,
processor_timeout: timedelta,
dag_ids: list[str] | None,
pickle_dags: bool,
signal_conn: MultiprocessingConnection | None = None,
async_mode: bool = True,
):
super().__init__()
# known files; this will be updated every `dag_dir_list_interval` and stuff added/removed accordingly
self._file_paths: list[str] = []
self._file_path_queue: collections.deque[str] = collections.deque()
self._max_runs = max_runs
# signal_conn is None for dag_processor_standalone mode.
self._direct_scheduler_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: float | None = None
self._dag_directory = dag_directory
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode and self._direct_scheduler_conn is not None:
os.set_blocking(self._direct_scheduler_conn.fileno(), False)
self.standalone_dag_processor = conf.getboolean("scheduler", "standalone_dag_processor")
self._parallelism = conf.getint("scheduler", "parsing_processes")
if (
conf.get_mandatory_value("database", "sql_alchemy_conn").startswith("sqlite")
and self._parallelism > 1
):
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint("scheduler", "min_file_process_interval")
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint("scheduler", "print_stats_interval")
# Map from file path to the processor
self._processors: dict[str, DagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: dict[str, DagFileStat] = {}
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# Last time we cleaned up DAGs which are no longer in files
self.last_deactivate_stale_dags_time = timezone.make_aware(datetime.fromtimestamp(0))
# How often to check for DAGs which are no longer in files
self.parsing_cleanup_interval = conf.getint("scheduler", "parsing_cleanup_interval")
# How long to wait for a DAG to be reparsed after its file has been parsed before disabling
self.stale_dag_threshold = conf.getint("scheduler", "stale_dag_threshold")
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint("scheduler", "dag_dir_list_interval")
# Mapping file name and callbacks requests
self._callback_to_execute: dict[str, list[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger("airflow.processor_manager")
self.waitables: dict[Any, MultiprocessingConnection | DagFileProcessorProcess] = (
{
self._direct_scheduler_conn: self._direct_scheduler_conn,
}
if self._direct_scheduler_conn is not None
else {}
)
self.heartbeat: Callable[[], None] = lambda: None
def register_exit_signals(self):
"""Register signals that stop child processes."""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame):
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", "\n".join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the DAGs in parallel.
By processing them in separate processes, we can get parallelism and isolation
from potentially harmful user code.
"""
self.register_exit_signals()
set_new_process_group()
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
def _scan_stale_dags(self):
"""Scan at fix internal DAGs which are no longer present in files."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_deactivate_stale_dags_time).total_seconds()
if elapsed_time_since_refresh > self.parsing_cleanup_interval:
last_parsed = {
fp: self.get_last_finish_time(fp) for fp in self.file_paths if self.get_last_finish_time(fp)
}
DagFileProcessorManager.deactivate_stale_dags(
last_parsed=last_parsed,
dag_directory=self.get_dag_directory(),
stale_dag_threshold=self.stale_dag_threshold,
)
self.last_deactivate_stale_dags_time = timezone.utcnow()
@classmethod
@internal_api_call
@provide_session
def deactivate_stale_dags(
cls,
last_parsed: dict[str, datetime | None],
dag_directory: str,
stale_dag_threshold: int,
session: Session = NEW_SESSION,
):
"""
Detects DAGs which are no longer present in files.
Deactivate them and remove them in the serialized_dag table.
"""
to_deactivate = set()
query = session.query(DagModel.dag_id, DagModel.fileloc, DagModel.last_parsed_time).filter(
DagModel.is_active
)
standalone_dag_processor = conf.getboolean("scheduler", "standalone_dag_processor")
if standalone_dag_processor:
query = query.filter(DagModel.processor_subdir == dag_directory)
dags_parsed = query.all()
for dag in dags_parsed:
# The largest valid difference between a DagFileStat's last_finished_time and a DAG's
# last_parsed_time is the processor_timeout. Longer than that indicates that the DAG is
# no longer present in the file. We have a stale_dag_threshold configured to prevent a
# significant delay in deactivation of stale dags when a large timeout is configured
if (
dag.fileloc in last_parsed
and (dag.last_parsed_time + timedelta(seconds=stale_dag_threshold)) < last_parsed[dag.fileloc]
):
cls.logger().info("DAG %s is missing and will be deactivated.", dag.dag_id)
to_deactivate.add(dag.dag_id)
if to_deactivate:
deactivated = (
session.query(DagModel)
.filter(DagModel.dag_id.in_(to_deactivate))
.update({DagModel.is_active: False}, synchronize_session="fetch")
)
if deactivated:
cls.logger().info("Deactivated %i DAGs which are no longer present in file.", deactivated)
for dag_id in to_deactivate:
SerializedDagModel.remove_dag(dag_id)
cls.logger().info("Deleted DAG %s in serialized_dag table", dag_id)
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
max_callbacks_per_loop = conf.getint("scheduler", "max_callbacks_per_loop")
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
self.heartbeat()
if self._direct_scheduler_conn is not None and self._direct_scheduler_conn in ready:
agent_signal = self._direct_scheduler_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._direct_scheduler_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
if self.standalone_dag_processor:
self._fetch_callbacks(max_callbacks_per_loop)
self._scan_stale_dags()
DagWarning.purge_inactive_dag_warnings()
refreshed_dag_dir = self._refresh_dag_dir()
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files already. Note for this
# to clear down, we must have cleared all files found from scanning the dags dir _and_ have
# cleared all files added as a result of callbacks
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
# if new files found in dag dir, add them
elif refreshed_dag_dir:
self.add_new_file_path_to_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
if self._direct_scheduler_conn:
self._direct_scheduler_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
@provide_session
def _fetch_callbacks(self, max_callbacks: int, session: Session = NEW_SESSION):
self._fetch_callbacks_with_retries(max_callbacks, session)
@retry_db_transaction
def _fetch_callbacks_with_retries(self, max_callbacks: int, session: Session):
"""Fetches callbacks from database and add them to the internal queue for execution."""
self.log.debug("Fetching callbacks from the database.")
with prohibit_commit(session) as guard:
query = session.query(DbCallbackRequest)
if self.standalone_dag_processor:
query = query.filter(
DbCallbackRequest.processor_subdir == self.get_dag_directory(),
)
query = query.order_by(DbCallbackRequest.priority_weight.asc()).limit(max_callbacks)
callbacks = with_row_locks(
query, of=DbCallbackRequest, session=session, **skip_locked(session=session)
).all()
for callback in callbacks:
try:
self._add_callback_to_queue(callback.get_callback_request())
session.delete(callback)
except Exception as e:
self.log.warning("Error adding callback for execution: %s, %s", callback, e)
guard.commit()
def _add_callback_to_queue(self, request: CallbackRequest):
# requests are sent by dag processors. SLAs exist per-dag, but can be generated once per SLA-enabled
# task in the dag. If treated like other callbacks, SLAs can cause feedback where a SLA arrives,
# goes to the front of the queue, gets processed, triggers more SLAs from the same DAG, which go to
# the front of the queue, and we never get round to picking stuff off the back of the queue
if isinstance(request, SlaCallbackRequest):
if request in self._callback_to_execute[request.full_filepath]:
self.log.debug("Skipping already queued SlaCallbackRequest")
return
# not already queued, queue the callback
# do NOT add the file of this SLA to self._file_path_queue. SLAs can arrive so rapidly that
# they keep adding to the file queue and never letting it drain. This in turn prevents us from
# ever rescanning the dags folder for changes to existing dags. We simply store the callback, and
# periodically, when self._file_path_queue is drained, we rescan and re-queue all DAG files.
# The SLAs will be picked up then. It means a delay in reacting to the SLAs (as controlled by the
# min_file_process_interval config) but stops SLAs from DoS'ing the queue.
self.log.debug("Queuing SlaCallbackRequest for %s", request.dag_id)
self._callback_to_execute[request.full_filepath].append(request)
Stats.incr("dag_processing.sla_callback_count")
# Other callbacks have a higher priority over DAG Run scheduling, so those callbacks gazump, even if
# already in the file path queue
else:
self.log.debug("Queuing %s CallbackRequest: %s", type(request).__name__, request)
self._callback_to_execute[request.full_filepath].append(request)
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = collections.deque(
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
)
self._add_paths_to_queue([request.full_filepath], True)
Stats.incr("dag_processing.other_callback_count")
def _refresh_dag_dir(self) -> bool:
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
DagFileProcessorManager.clear_nonexistent_import_errors(file_paths=self._file_paths)
except Exception:
self.log.exception("Error removing old import errors")
def _iter_dag_filelocs(fileloc: str) -> Iterator[str]:
"""Get "full" paths to DAGs if inside ZIP files.
This is the format used by the remove/delete functions.
"""
if fileloc.endswith(".py") or not zipfile.is_zipfile(fileloc):
yield fileloc
return
try:
with zipfile.ZipFile(fileloc) as z:
for info in z.infolist():
if might_contain_dag(info.filename, True, z):
yield os.path.join(fileloc, info.filename)
except zipfile.BadZipFile:
self.log.exception("There was an error accessing ZIP file %s %s", fileloc)
dag_filelocs = {full_loc for path in self._file_paths for full_loc in _iter_dag_filelocs(path)}
from airflow.models.dagcode import DagCode
SerializedDagModel.remove_deleted_dags(
alive_dag_filelocs=dag_filelocs,
processor_subdir=self.get_dag_directory(),
)
DagModel.deactivate_deleted_dags(dag_filelocs)
DagCode.remove_deleted_code(dag_filelocs)
return True
return False
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed."""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@staticmethod
@internal_api_call
@provide_session
def clear_nonexistent_import_errors(file_paths: list[str] | None, session=NEW_SESSION):
"""
Clears import errors for files that no longer exist.
:param file_paths: list of paths to DAG definition files
:param session: session for ORM operations
"""
query = session.query(errors.ImportError)
if file_paths:
query = query.filter(~errors.ImportError.filename.in_(file_paths))
query.delete(synchronize_session="fetch")
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, ".")
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f"dag_processing.last_run.seconds_ago.{file_name}", seconds_ago)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path) -> int | None:
"""
Retrieve the PID of the process processing the given file or None if the file is not being processed.
:param file_path: the path to the file that's being processed.
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self) -> list[int]:
"""
Get all pids.
:return: a list of the PIDs for the processors that are running
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path) -> float | None:
"""
Retrieve the last processing time of a specific path.
:param file_path: the path to the file that was processed
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
"""
stat = self._file_stats.get(file_path)
return stat.last_duration.total_seconds() if stat and stat.last_duration else None
def get_last_dag_count(self, file_path) -> int | None:
"""
Retrieve the total DAG count at a specific path.
:param file_path: the path to the file that was processed
:return: the number of dags loaded from that file, or None if the file was never processed.
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path) -> int | None:
"""
Retrieve the total number of errors from processing a specific path.
:param file_path: the path to the file that was processed
:return: the number of import errors from processing, or None if the file was never processed.
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path) -> datetime | None:
"""
Retrieve the last completion time for processing a specific path.
:param file_path: the path to the file that was processed
:return: the finish time of the process of the last run, or None if the file was never processed.
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path) -> datetime | None:
"""
Retrieve the last start time for processing a specific path.
:param file_path: the path to the file that's being processed
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed.
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path) -> int:
"""
The number of times the given file has been parsed.
:param file_path: the path to the file that's being processed.
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def get_dag_directory(self) -> str:
"""Returns the dag_director as a string."""
if isinstance(self._dag_directory, Path):
return str(self._dag_directory.resolve())
else:
return str(self._dag_directory)
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:return: None
"""
self._file_paths = new_file_paths
# clean up the queues; remove anything queued which no longer in the list, including callbacks
self._file_path_queue = collections.deque(x for x in self._file_path_queue if x in new_file_paths)
Stats.gauge("dag_processing.file_path_queue_size", len(self._file_path_queue))
callback_paths_to_del = [x for x in self._callback_to_execute if x not in new_file_paths]
for path_to_del in callback_paths_to_del:
del self._callback_to_execute[path_to_del]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr("dag_processing.processes", tags={"file_path": file_path, "action": "stop"})
processor.terminate()
self._file_stats.pop(file_path)
to_remove = set(self._file_stats.keys()) - set(self._file_paths)
for key in to_remove:
# Remove the stats for any dag files that don't exist anymore
del self._file_stats[key]
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr("dag_processing.processes", tags={"file_path": processor.file_path, "action": "finish"})
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
last_duration = last_finish_time - processor.start_time
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=last_duration,
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
file_name = os.path.splitext(os.path.basename(processor.file_path))[0].replace(os.sep, ".")
Stats.timing(f"dag_processing.last_duration.{file_name}", last_duration)
Stats.timing("dag_processing.last_duration", last_duration, tags={"file_name": file_name})
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors."""
ready = multiprocessing.connection.wait(
self.waitables.keys() - [self._direct_scheduler_conn], timeout=0
)
for sentinel in ready:
if sentinel is self._direct_scheduler_conn:
continue
processor = cast(DagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
@staticmethod
def _create_process(file_path, pickle_dags, dag_ids, dag_directory, callback_requests):
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_ids=dag_ids,
dag_directory=dag_directory,
callback_requests=callback_requests,
)
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process."""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.popleft()
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._create_process(
file_path,
self._pickle_dags,
self._dag_ids,
self.get_dag_directory(),
callback_to_execute_for_file,
)
del self._callback_to_execute[file_path]
Stats.incr("dag_processing.processes", tags={"file_path": file_path, "action": "start"})
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
Stats.gauge("dag_processing.file_path_queue_size", len(self._file_path_queue))
def add_new_file_path_to_queue(self):
for file_path in self.file_paths:
if file_path not in self._file_stats:
# We found new file after refreshing dir. add to parsing queue at start
self.log.info("Adding new file %s to parsing queue", file_path)
self._file_stats[file_path] = DagFileProcessorManager.DEFAULT_FILE_STAT
self._file_path_queue.appendleft(file_path)
def prepare_file_path_queue(self):
"""
Scan dags dir to generate more file paths to process.
Note this method is only called when the file path queue is empty
"""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
file_paths_to_stop_watching = set()
for file_path in self._file_paths:
if is_mtime_mode:
try:
files_with_mtime[file_path] = os.path.getmtime(file_path)
except FileNotFoundError:
self.log.warning("Skipping processing of missing file: %s", file_path)
self._file_stats.pop(file_path, None)
file_paths_to_stop_watching.add(file_path)
continue
file_modified_time = datetime.fromtimestamp(files_with_mtime[file_path], tz=timezone.utc)
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
if file_paths_to_stop_watching:
self.set_file_paths(
[path for path in self._file_paths if path not in file_paths_to_stop_watching]
)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed,
files_paths_at_run_limit,
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
self._file_stats.setdefault(file_path, DagFileProcessorManager.DEFAULT_FILE_STAT)
self._add_paths_to_queue(files_paths_to_queue, False)
Stats.incr("dag_processing.file_path_queue_update_count")
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
processors_to_remove = []
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr("dag_processing.processes", tags={"file_path": file_path, "action": "timeout"})
Stats.incr("dag_processing.processor_timeouts", tags={"file_path": file_path})
# Deprecated; may be removed in a future Airflow release.
Stats.incr("dag_file_processor_timeouts")
processor.kill()
# Clean up processor references
self.waitables.pop(processor.waitable_handle)
processors_to_remove.append(file_path)
stat = DagFileStat(
num_dags=0,
import_errors=1,
last_finish_time=now,
last_duration=duration,
run_count=self.get_run_count(file_path) + 1,
)
self._file_stats[processor.file_path] = stat
# Clean up `self._processors` after iterating over it
for proc in processors_to_remove:
self._processors.pop(proc)
def _add_paths_to_queue(self, file_paths_to_enqueue: list[str], add_at_front: bool):
"""Adds stuff to the back or front of the file queue, unless it's already present."""
new_file_paths = list(p for p in file_paths_to_enqueue if p not in self._file_path_queue)
if add_at_front:
self._file_path_queue.extendleft(new_file_paths)
else:
self._file_path_queue.extend(new_file_paths)
Stats.gauge("dag_processing.file_path_queue_size", len(self._file_path_queue))
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times."""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""Stops all running processors."""
for processor in self._processors.values():
Stats.decr(
"dag_processing.processes", tags={"file_path": processor.file_path, "action": "terminate"}
)
processor.terminate()
def end(self):
"""Kill all child processes on exit since we don't want to leave them as orphaned."""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary.
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge("dag_processing.total_parse_time", parse_time)
Stats.gauge("dagbag_size", sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
"dag_processing.import_errors", sum(stat.import_errors for stat in self._file_stats.values())
)
@property
def file_paths(self):
return self._file_paths
| 56,455 | 42.832298 | 110 |
py
|
airflow
|
airflow-main/airflow/executors/executor_constants.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
LOCAL_EXECUTOR = "LocalExecutor"
LOCAL_KUBERNETES_EXECUTOR = "LocalKubernetesExecutor"
SEQUENTIAL_EXECUTOR = "SequentialExecutor"
CELERY_EXECUTOR = "CeleryExecutor"
CELERY_KUBERNETES_EXECUTOR = "CeleryKubernetesExecutor"
DASK_EXECUTOR = "DaskExecutor"
KUBERNETES_EXECUTOR = "KubernetesExecutor"
DEBUG_EXECUTOR = "DebugExecutor"
MOCK_EXECUTOR = "MockExecutor"
| 1,180 | 41.178571 | 62 |
py
|
airflow
|
airflow-main/airflow/executors/executor_loader.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""All executors."""
from __future__ import annotations
import functools
import logging
import os
from contextlib import suppress
from enum import Enum, unique
from typing import TYPE_CHECKING
from airflow.exceptions import AirflowConfigException
from airflow.executors.executor_constants import (
CELERY_EXECUTOR,
CELERY_KUBERNETES_EXECUTOR,
DASK_EXECUTOR,
DEBUG_EXECUTOR,
KUBERNETES_EXECUTOR,
LOCAL_EXECUTOR,
LOCAL_KUBERNETES_EXECUTOR,
SEQUENTIAL_EXECUTOR,
)
from airflow.utils.module_loading import import_string
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from airflow.executors.base_executor import BaseExecutor
@unique
class ConnectorSource(Enum):
"""Enum of supported executor import sources."""
CORE = "core"
PLUGIN = "plugin"
CUSTOM_PATH = "custom path"
class ExecutorLoader:
"""Keeps constants for all the currently available executors."""
_default_executor: BaseExecutor | None = None
executors = {
LOCAL_EXECUTOR: "airflow.executors.local_executor.LocalExecutor",
LOCAL_KUBERNETES_EXECUTOR: "airflow.executors.local_kubernetes_executor.LocalKubernetesExecutor",
SEQUENTIAL_EXECUTOR: "airflow.executors.sequential_executor.SequentialExecutor",
CELERY_EXECUTOR: "airflow.providers.celery.executors.celery_executor.CeleryExecutor",
CELERY_KUBERNETES_EXECUTOR: "airflow.providers.celery."
"executors.celery_kubernetes_executor.CeleryKubernetesExecutor",
DASK_EXECUTOR: "airflow.executors.dask_executor.DaskExecutor",
KUBERNETES_EXECUTOR: "airflow.executors.kubernetes_executor.KubernetesExecutor",
DEBUG_EXECUTOR: "airflow.executors.debug_executor.DebugExecutor",
}
@classmethod
def get_default_executor_name(cls) -> str:
"""Returns the default executor name from Airflow configuration.
:return: executor name from Airflow configuration
"""
from airflow.configuration import conf
return conf.get_mandatory_value("core", "EXECUTOR")
@classmethod
def get_default_executor(cls) -> BaseExecutor:
"""Creates a new instance of the configured executor if none exists and returns it."""
if cls._default_executor is not None:
return cls._default_executor
return cls.load_executor(cls.get_default_executor_name())
@classmethod
def load_executor(cls, executor_name: str) -> BaseExecutor:
"""
Loads the executor.
This supports the following formats:
* by executor name for core executor
* by ``{plugin_name}.{class_name}`` for executor from plugins
* by import path.
:return: an instance of executor class via executor_name
"""
if executor_name == CELERY_KUBERNETES_EXECUTOR:
return cls.__load_celery_kubernetes_executor()
elif executor_name == LOCAL_KUBERNETES_EXECUTOR:
return cls.__load_local_kubernetes_executor()
try:
executor_cls, import_source = cls.import_executor_cls(executor_name)
log.debug("Loading executor %s from %s", executor_name, import_source.value)
except ImportError as e:
log.error(e)
raise AirflowConfigException(
f'The module/attribute could not be loaded. Please check "executor" key in "core" section. '
f'Current value: "{executor_name}".'
)
log.info("Loaded executor: %s", executor_name)
return executor_cls()
@classmethod
def import_executor_cls(cls, executor_name: str) -> tuple[type[BaseExecutor], ConnectorSource]:
"""
Imports the executor class.
Supports the same formats as ExecutorLoader.load_executor.
:return: executor class via executor_name and executor import source
"""
def _import_and_validate(path: str) -> type[BaseExecutor]:
executor = import_string(path)
cls.validate_database_executor_compatibility(executor)
return executor
if executor_name in cls.executors:
return _import_and_validate(cls.executors[executor_name]), ConnectorSource.CORE
if executor_name.count(".") == 1:
log.debug(
"The executor name looks like the plugin path (executor_name=%s). Trying to import a "
"executor from a plugin",
executor_name,
)
with suppress(ImportError, AttributeError):
# Load plugins here for executors as at that time the plugins might not have been
# initialized yet
from airflow import plugins_manager
plugins_manager.integrate_executor_plugins()
return _import_and_validate(f"airflow.executors.{executor_name}"), ConnectorSource.PLUGIN
return _import_and_validate(executor_name), ConnectorSource.CUSTOM_PATH
@classmethod
def import_default_executor_cls(cls) -> tuple[type[BaseExecutor], ConnectorSource]:
"""
Imports the default executor class.
:return: executor class and executor import source
"""
executor_name = cls.get_default_executor_name()
executor, source = cls.import_executor_cls(executor_name)
return executor, source
@classmethod
@functools.lru_cache(maxsize=None)
def validate_database_executor_compatibility(cls, executor: type[BaseExecutor]) -> None:
"""Validate database and executor compatibility.
Most of the databases work universally, but SQLite can only work with
single-threaded executors (e.g. Sequential).
This is NOT done in ``airflow.configuration`` (when configuration is
initialized) because loading the executor class is heavy work we want to
avoid unless needed.
"""
# Single threaded executors can run with any backend.
if executor.is_single_threaded:
return
# This is set in tests when we want to be able to use SQLite.
if os.environ.get("_AIRFLOW__SKIP_DATABASE_EXECUTOR_COMPATIBILITY_CHECK") == "1":
return
from airflow.settings import engine
# SQLite only works with single threaded executors
if engine.dialect.name == "sqlite":
raise AirflowConfigException(f"error: cannot use SQLite with the {executor.__name__}")
@classmethod
def __load_celery_kubernetes_executor(cls) -> BaseExecutor:
celery_executor = import_string(cls.executors[CELERY_EXECUTOR])()
kubernetes_executor = import_string(cls.executors[KUBERNETES_EXECUTOR])()
celery_kubernetes_executor_cls = import_string(cls.executors[CELERY_KUBERNETES_EXECUTOR])
return celery_kubernetes_executor_cls(celery_executor, kubernetes_executor)
@classmethod
def __load_local_kubernetes_executor(cls) -> BaseExecutor:
local_executor = import_string(cls.executors[LOCAL_EXECUTOR])()
kubernetes_executor = import_string(cls.executors[KUBERNETES_EXECUTOR])()
local_kubernetes_executor_cls = import_string(cls.executors[LOCAL_KUBERNETES_EXECUTOR])
return local_kubernetes_executor_cls(local_executor, kubernetes_executor)
# This tuple is deprecated due to AIP-51 and is no longer used in core Airflow.
# TODO: Remove in Airflow 3.0
UNPICKLEABLE_EXECUTORS = (
LOCAL_EXECUTOR,
SEQUENTIAL_EXECUTOR,
DASK_EXECUTOR,
)
| 8,240 | 37.872642 | 108 |
py
|
airflow
|
airflow-main/airflow/executors/sequential_executor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
SequentialExecutor.
.. seealso::
For more information on how the SequentialExecutor works, take a look at the guide:
:ref:`executor:SequentialExecutor`
"""
from __future__ import annotations
import subprocess
from typing import TYPE_CHECKING, Any
from airflow.executors.base_executor import BaseExecutor
from airflow.utils.state import State
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
from airflow.models.taskinstancekey import TaskInstanceKey
class SequentialExecutor(BaseExecutor):
"""
This executor will only run one task instance at a time.
It can be used for debugging. It is also the only executor
that can be used with sqlite since sqlite doesn't support
multiple connections.
Since we want airflow to work out of the box, it defaults to this
SequentialExecutor alongside sqlite as you first install it.
"""
supports_pickling: bool = False
is_local: bool = True
is_single_threaded: bool = True
is_production: bool = False
serve_logs: bool = True
def __init__(self):
super().__init__()
self.commands_to_run = []
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: str | None = None,
executor_config: Any | None = None,
) -> None:
self.validate_airflow_tasks_run_command(command)
self.commands_to_run.append((key, command))
def sync(self) -> None:
for key, command in self.commands_to_run:
self.log.info("Executing command: %s", command)
try:
subprocess.check_call(command, close_fds=True)
self.change_state(key, State.SUCCESS)
except subprocess.CalledProcessError as e:
self.change_state(key, State.FAILED)
self.log.error("Failed to execute task %s.", str(e))
self.commands_to_run = []
def end(self):
"""End the executor."""
self.heartbeat()
def terminate(self):
"""Terminate the executor is not doing anything."""
| 2,889 | 30.758242 | 87 |
py
|
airflow
|
airflow-main/airflow/executors/local_kubernetes_executor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.callbacks.base_callback_sink import BaseCallbackSink
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.configuration import conf
from airflow.executors.kubernetes_executor import KubernetesExecutor
from airflow.executors.local_executor import LocalExecutor
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType, EventBufferValueType, QueuedTaskInstanceType
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance, TaskInstanceKey
class LocalKubernetesExecutor(LoggingMixin):
"""
Chooses between LocalExecutor and KubernetesExecutor based on the queue defined on the task.
When the task's queue is the value of ``kubernetes_queue`` in section ``[local_kubernetes_executor]``
of the configuration (default value: `kubernetes`), KubernetesExecutor is selected to run the task,
otherwise, LocalExecutor is used.
"""
supports_ad_hoc_ti_run: bool = True
supports_pickling: bool = False
supports_sentry: bool = False
is_local: bool = False
is_single_threaded: bool = False
is_production: bool = True
serve_logs: bool = True
change_sensor_mode_to_reschedule: bool = False
callback_sink: BaseCallbackSink | None = None
KUBERNETES_QUEUE = conf.get("local_kubernetes_executor", "kubernetes_queue")
def __init__(self, local_executor: LocalExecutor, kubernetes_executor: KubernetesExecutor):
super().__init__()
self._job_id: str | None = None
self.local_executor = local_executor
self.kubernetes_executor = kubernetes_executor
self.kubernetes_executor.kubernetes_queue = self.KUBERNETES_QUEUE
@property
def queued_tasks(self) -> dict[TaskInstanceKey, QueuedTaskInstanceType]:
"""Return queued tasks from local and kubernetes executor."""
queued_tasks = self.local_executor.queued_tasks.copy()
queued_tasks.update(self.kubernetes_executor.queued_tasks)
return queued_tasks
@property
def running(self) -> set[TaskInstanceKey]:
"""Return running tasks from local and kubernetes executor."""
return self.local_executor.running.union(self.kubernetes_executor.running)
@property
def job_id(self) -> str | None:
"""
Inherited attribute from BaseExecutor.
Since this is not really an executor, but a wrapper of executors
we implemented it as property, so we can have custom setter.
"""
return self._job_id
@job_id.setter
def job_id(self, value: str | None) -> None:
"""Expose job ID for SchedulerJob."""
self._job_id = value
self.kubernetes_executor.job_id = value
self.local_executor.job_id = value
def start(self) -> None:
"""Start local and kubernetes executor."""
self.log.info("Starting local and Kubernetes Executor")
self.local_executor.start()
self.kubernetes_executor.start()
@property
def slots_available(self) -> int:
"""Number of new tasks this executor instance can accept."""
return self.local_executor.slots_available
def queue_command(
self,
task_instance: TaskInstance,
command: CommandType,
priority: int = 1,
queue: str | None = None,
) -> None:
"""Queues command via local or kubernetes executor."""
executor = self._router(task_instance)
self.log.debug("Using executor: %s for %s", executor.__class__.__name__, task_instance.key)
executor.queue_command(task_instance, command, priority, queue)
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
pickle_id: int | None = None,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: str | None = None,
cfg_path: str | None = None,
) -> None:
"""Queues task instance via local or kubernetes executor."""
from airflow.models.taskinstance import SimpleTaskInstance
executor = self._router(SimpleTaskInstance.from_ti(task_instance))
self.log.debug(
"Using executor: %s to queue_task_instance for %s", executor.__class__.__name__, task_instance.key
)
executor.queue_task_instance(
task_instance=task_instance,
mark_success=mark_success,
pickle_id=pickle_id,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
pool=pool,
cfg_path=cfg_path,
)
def get_task_log(self, ti: TaskInstance, try_number: int) -> tuple[list[str], list[str]]:
"""Fetch task log from kubernetes executor."""
if ti.queue == self.kubernetes_executor.kubernetes_queue:
return self.kubernetes_executor.get_task_log(ti=ti, try_number=try_number)
return [], []
def has_task(self, task_instance: TaskInstance) -> bool:
"""
Checks if a task is either queued or running in either local or kubernetes executor.
:param task_instance: TaskInstance
:return: True if the task is known to this executor
"""
return self.local_executor.has_task(task_instance) or self.kubernetes_executor.has_task(task_instance)
def heartbeat(self) -> None:
"""Heartbeat sent to trigger new jobs in local and kubernetes executor."""
self.local_executor.heartbeat()
self.kubernetes_executor.heartbeat()
def get_event_buffer(
self, dag_ids: list[str] | None = None
) -> dict[TaskInstanceKey, EventBufferValueType]:
"""
Return and flush the event buffer from local and kubernetes executor.
:param dag_ids: dag_ids to return events for, if None returns all
:return: a dict of events
"""
cleared_events_from_local = self.local_executor.get_event_buffer(dag_ids)
cleared_events_from_kubernetes = self.kubernetes_executor.get_event_buffer(dag_ids)
return {**cleared_events_from_local, **cleared_events_from_kubernetes}
def try_adopt_task_instances(self, tis: Sequence[TaskInstance]) -> Sequence[TaskInstance]:
"""
Try to adopt running task instances that have been abandoned by a SchedulerJob dying.
Anything that is not adopted will be cleared by the scheduler (and then become eligible for
re-scheduling)
:return: any TaskInstances that were unable to be adopted
"""
local_tis = [ti for ti in tis if ti.queue != self.KUBERNETES_QUEUE]
kubernetes_tis = [ti for ti in tis if ti.queue == self.KUBERNETES_QUEUE]
return [
*self.local_executor.try_adopt_task_instances(local_tis),
*self.kubernetes_executor.try_adopt_task_instances(kubernetes_tis),
]
def cleanup_stuck_queued_tasks(self, tis: list[TaskInstance]) -> list[str]:
# LocalExecutor doesn't have a cleanup_stuck_queued_tasks method, so we
# will only run KubernetesExecutor's
kubernetes_tis = [ti for ti in tis if ti.queue == self.KUBERNETES_QUEUE]
return self.kubernetes_executor.cleanup_stuck_queued_tasks(kubernetes_tis)
def end(self) -> None:
"""End local and kubernetes executor."""
self.local_executor.end()
self.kubernetes_executor.end()
def terminate(self) -> None:
"""Terminate local and kubernetes executor."""
self.local_executor.terminate()
self.kubernetes_executor.terminate()
def _router(self, simple_task_instance: SimpleTaskInstance) -> LocalExecutor | KubernetesExecutor:
"""
Return either local_executor or kubernetes_executor.
:param simple_task_instance: SimpleTaskInstance
:return: local_executor or kubernetes_executor
"""
if simple_task_instance.queue == self.KUBERNETES_QUEUE:
return self.kubernetes_executor
return self.local_executor
def debug_dump(self) -> None:
"""Called in response to SIGUSR2 by the scheduler."""
self.log.info("Dumping LocalExecutor state")
self.local_executor.debug_dump()
self.log.info("Dumping KubernetesExecutor state")
self.kubernetes_executor.debug_dump()
def send_callback(self, request: CallbackRequest) -> None:
"""Sends callback for execution.
:param request: Callback request to be executed.
"""
if not self.callback_sink:
raise ValueError("Callback sink is not ready.")
self.callback_sink.send(request)
| 9,849 | 39.534979 | 110 |
py
|
airflow
|
airflow-main/airflow/executors/kubernetes_executor_types.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
from airflow.models.taskinstance import TaskInstanceKey
# TaskInstance key, command, configuration, pod_template_file
KubernetesJobType = Tuple[TaskInstanceKey, CommandType, Any, Optional[str]]
# key, pod state, pod_name, namespace, resource_version
KubernetesResultsType = Tuple[TaskInstanceKey, Optional[str], str, str, str]
# pod_name, namespace, pod state, annotations, resource_version
KubernetesWatchType = Tuple[str, str, Optional[str], Dict[str, str], str]
ALL_NAMESPACES = "ALL_NAMESPACES"
POD_EXECUTOR_DONE_KEY = "airflow_executor_done"
| 1,540 | 41.805556 | 80 |
py
|
airflow
|
airflow-main/airflow/executors/dask_executor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
DaskExecutor.
.. seealso::
For more information on how the DaskExecutor works, take a look at the guide:
:ref:`executor:DaskExecutor`
"""
from __future__ import annotations
import subprocess
from typing import TYPE_CHECKING, Any
from distributed import Client, Future, as_completed
from distributed.security import Security
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
from airflow.models.taskinstancekey import TaskInstanceKey
# queue="default" is a special case since this is the base config default queue name,
# with respect to DaskExecutor, treat it as if no queue is provided
_UNDEFINED_QUEUES = {None, "default"}
class DaskExecutor(BaseExecutor):
"""DaskExecutor submits tasks to a Dask Distributed cluster."""
supports_pickling: bool = False
def __init__(self, cluster_address=None):
super().__init__(parallelism=0)
if cluster_address is None:
cluster_address = conf.get("dask", "cluster_address")
if not cluster_address:
raise ValueError("Please provide a Dask cluster address in airflow.cfg")
self.cluster_address = cluster_address
# ssl / tls parameters
self.tls_ca = conf.get("dask", "tls_ca")
self.tls_key = conf.get("dask", "tls_key")
self.tls_cert = conf.get("dask", "tls_cert")
self.client: Client | None = None
self.futures: dict[Future, TaskInstanceKey] | None = None
def start(self) -> None:
if self.tls_ca or self.tls_key or self.tls_cert:
security = Security(
tls_client_key=self.tls_key,
tls_client_cert=self.tls_cert,
tls_ca_file=self.tls_ca,
require_encryption=True,
)
else:
security = None
self.client = Client(self.cluster_address, security=security)
self.futures = {}
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: str | None = None,
executor_config: Any | None = None,
) -> None:
if TYPE_CHECKING:
assert self.client
self.validate_airflow_tasks_run_command(command)
def airflow_run():
return subprocess.check_call(command, close_fds=True)
resources = None
if queue not in _UNDEFINED_QUEUES:
scheduler_info = self.client.scheduler_info()
avail_queues = {
resource for d in scheduler_info["workers"].values() for resource in d["resources"]
}
if queue not in avail_queues:
raise AirflowException(f"Attempted to submit task to an unavailable queue: '{queue}'")
resources = {queue: 1}
future = self.client.submit(subprocess.check_call, command, pure=False, resources=resources)
self.futures[future] = key # type: ignore
def _process_future(self, future: Future) -> None:
if TYPE_CHECKING:
assert self.futures
if future.done():
key = self.futures[future]
if future.exception():
self.log.error("Failed to execute task: %s", repr(future.exception()))
self.fail(key)
elif future.cancelled():
self.log.error("Failed to execute task")
self.fail(key)
else:
self.success(key)
self.futures.pop(future)
def sync(self) -> None:
if TYPE_CHECKING:
assert self.futures
# make a copy so futures can be popped during iteration
for future in self.futures.copy():
self._process_future(future)
def end(self) -> None:
if TYPE_CHECKING:
assert self.client
assert self.futures
self.client.cancel(list(self.futures.keys()))
for future in as_completed(self.futures.copy()):
self._process_future(future)
def terminate(self):
if TYPE_CHECKING:
assert self.futures
self.client.cancel(self.futures.keys())
self.end()
| 5,049 | 33.121622 | 102 |
py
|
airflow
|
airflow-main/airflow/executors/kubernetes_executor_utils.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import multiprocessing
import time
from queue import Empty, Queue
from typing import TYPE_CHECKING, Any
from kubernetes import client, watch
from kubernetes.client import Configuration, models as k8s
from kubernetes.client.rest import ApiException
from urllib3.exceptions import ReadTimeoutError
from airflow.exceptions import AirflowException
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.kubernetes_helper_functions import (
annotations_for_logging_task_metadata,
annotations_to_key,
create_pod_id,
)
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.singleton import Singleton
from airflow.utils.state import State
if TYPE_CHECKING:
from airflow.executors.kubernetes_executor_types import (
KubernetesJobType,
KubernetesResultsType,
KubernetesWatchType,
)
from airflow.executors.kubernetes_executor_types import ALL_NAMESPACES, POD_EXECUTOR_DONE_KEY
class ResourceVersion(metaclass=Singleton):
"""Singleton for tracking resourceVersion from Kubernetes."""
resource_version: dict[str, str] = {}
class KubernetesJobWatcher(multiprocessing.Process, LoggingMixin):
"""Watches for Kubernetes jobs."""
def __init__(
self,
namespace: str,
watcher_queue: Queue[KubernetesWatchType],
resource_version: str | None,
scheduler_job_id: str,
kube_config: Configuration,
):
super().__init__()
self.namespace = namespace
self.scheduler_job_id = scheduler_job_id
self.watcher_queue = watcher_queue
self.resource_version = resource_version
self.kube_config = kube_config
def run(self) -> None:
"""Performs watching."""
if TYPE_CHECKING:
assert self.scheduler_job_id
kube_client: client.CoreV1Api = get_kube_client()
while True:
try:
self.resource_version = self._run(
kube_client, self.resource_version, self.scheduler_job_id, self.kube_config
)
except ReadTimeoutError:
self.log.warning(
"There was a timeout error accessing the Kube API. Retrying request.", exc_info=True
)
time.sleep(1)
except Exception:
self.log.exception("Unknown error in KubernetesJobWatcher. Failing")
self.resource_version = "0"
ResourceVersion().resource_version[self.namespace] = "0"
raise
else:
self.log.warning(
"Watch died gracefully, starting back up with: last resource_version: %s",
self.resource_version,
)
def _pod_events(self, kube_client: client.CoreV1Api, query_kwargs: dict):
watcher = watch.Watch()
try:
if self.namespace == ALL_NAMESPACES:
return watcher.stream(kube_client.list_pod_for_all_namespaces, **query_kwargs)
else:
return watcher.stream(kube_client.list_namespaced_pod, self.namespace, **query_kwargs)
except ApiException as e:
if e.status == 410: # Resource version is too old
if self.namespace == ALL_NAMESPACES:
pods = kube_client.list_pod_for_all_namespaces(watch=False)
else:
pods = kube_client.list_namespaced_pod(namespace=self.namespace, watch=False)
resource_version = pods.metadata.resource_version
query_kwargs["resource_version"] = resource_version
return self._pod_events(kube_client=kube_client, query_kwargs=query_kwargs)
else:
raise
def _run(
self,
kube_client: client.CoreV1Api,
resource_version: str | None,
scheduler_job_id: str,
kube_config: Any,
) -> str | None:
self.log.info("Event: and now my watch begins starting at resource_version: %s", resource_version)
kwargs = {"label_selector": f"airflow-worker={scheduler_job_id}"}
if resource_version:
kwargs["resource_version"] = resource_version
if kube_config.kube_client_request_args:
for key, value in kube_config.kube_client_request_args.items():
kwargs[key] = value
last_resource_version: str | None = None
for event in self._pod_events(kube_client=kube_client, query_kwargs=kwargs):
task = event["object"]
self.log.debug("Event: %s had an event of type %s", task.metadata.name, event["type"])
if event["type"] == "ERROR":
return self.process_error(event)
annotations = task.metadata.annotations
task_instance_related_annotations = {
"dag_id": annotations["dag_id"],
"task_id": annotations["task_id"],
"execution_date": annotations.get("execution_date"),
"run_id": annotations.get("run_id"),
"try_number": annotations["try_number"],
}
map_index = annotations.get("map_index")
if map_index is not None:
task_instance_related_annotations["map_index"] = map_index
self.process_status(
pod_name=task.metadata.name,
namespace=task.metadata.namespace,
status=task.status.phase,
annotations=task_instance_related_annotations,
resource_version=task.metadata.resource_version,
event=event,
)
last_resource_version = task.metadata.resource_version
return last_resource_version
def process_error(self, event: Any) -> str:
"""Process error response."""
self.log.error("Encountered Error response from k8s list namespaced pod stream => %s", event)
raw_object = event["raw_object"]
if raw_object["code"] == 410:
self.log.info(
"Kubernetes resource version is too old, must reset to 0 => %s", (raw_object["message"],)
)
# Return resource version 0
return "0"
raise AirflowException(
f"Kubernetes failure for {raw_object['reason']} with code {raw_object['code']} and message: "
f"{raw_object['message']}"
)
def process_status(
self,
pod_name: str,
namespace: str,
status: str,
annotations: dict[str, str],
resource_version: str,
event: Any,
) -> None:
pod = event["object"]
annotations_string = annotations_for_logging_task_metadata(annotations)
"""Process status response."""
if status == "Pending":
# deletion_timestamp is set by kube server when a graceful deletion is requested.
# since kube server have received request to delete pod set TI state failed
if event["type"] == "DELETED" and pod.metadata.deletion_timestamp:
self.log.info("Event: Failed to start pod %s, annotations: %s", pod_name, annotations_string)
self.watcher_queue.put((pod_name, namespace, State.FAILED, annotations, resource_version))
else:
self.log.debug("Event: %s Pending, annotations: %s", pod_name, annotations_string)
elif status == "Failed":
self.log.error("Event: %s Failed, annotations: %s", pod_name, annotations_string)
self.watcher_queue.put((pod_name, namespace, State.FAILED, annotations, resource_version))
elif status == "Succeeded":
# We get multiple events once the pod hits a terminal state, and we only want to
# send it along to the scheduler once.
# If our event type is DELETED, we have the POD_EXECUTOR_DONE_KEY, or the pod has
# a deletion timestamp, we've already seen the initial Succeeded event and sent it
# along to the scheduler.
if (
event["type"] == "DELETED"
or POD_EXECUTOR_DONE_KEY in pod.metadata.labels
or pod.metadata.deletion_timestamp
):
self.log.info(
"Skipping event for Succeeded pod %s - event for this pod already sent to executor",
pod_name,
)
return
self.log.info("Event: %s Succeeded, annotations: %s", pod_name, annotations_string)
self.watcher_queue.put((pod_name, namespace, None, annotations, resource_version))
elif status == "Running":
# deletion_timestamp is set by kube server when a graceful deletion is requested.
# since kube server have received request to delete pod set TI state failed
if event["type"] == "DELETED" and pod.metadata.deletion_timestamp:
self.log.info(
"Event: Pod %s deleted before it could complete, annotations: %s",
pod_name,
annotations_string,
)
self.watcher_queue.put((pod_name, namespace, State.FAILED, annotations, resource_version))
else:
self.log.info("Event: %s is Running, annotations: %s", pod_name, annotations_string)
else:
self.log.warning(
"Event: Invalid state: %s on pod: %s in namespace %s with annotations: %s with "
"resource_version: %s",
status,
pod_name,
namespace,
annotations,
resource_version,
)
class AirflowKubernetesScheduler(LoggingMixin):
"""Airflow Scheduler for Kubernetes."""
def __init__(
self,
kube_config: Any,
result_queue: Queue[KubernetesResultsType],
kube_client: client.CoreV1Api,
scheduler_job_id: str,
):
super().__init__()
self.log.debug("Creating Kubernetes executor")
self.kube_config = kube_config
self.result_queue = result_queue
self.namespace = self.kube_config.kube_namespace
self.log.debug("Kubernetes using namespace %s", self.namespace)
self.kube_client = kube_client
self._manager = multiprocessing.Manager()
self.watcher_queue = self._manager.Queue()
self.scheduler_job_id = scheduler_job_id
self.kube_watchers = self._make_kube_watchers()
def run_pod_async(self, pod: k8s.V1Pod, **kwargs):
"""Runs POD asynchronously."""
sanitized_pod = self.kube_client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug("Pod Creation Request: \n%s", json_pod)
try:
resp = self.kube_client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug("Pod Creation Response: %s", resp)
except Exception as e:
self.log.exception("Exception when attempting to create Namespaced Pod: %s", json_pod)
raise e
return resp
def _make_kube_watcher(self, namespace) -> KubernetesJobWatcher:
resource_version = ResourceVersion().resource_version.get(namespace, "0")
watcher = KubernetesJobWatcher(
watcher_queue=self.watcher_queue,
namespace=namespace,
resource_version=resource_version,
scheduler_job_id=self.scheduler_job_id,
kube_config=self.kube_config,
)
watcher.start()
return watcher
def _make_kube_watchers(self) -> dict[str, KubernetesJobWatcher]:
watchers = {}
if self.kube_config.multi_namespace_mode:
namespaces_to_watch = (
self.kube_config.multi_namespace_mode_namespace_list
if self.kube_config.multi_namespace_mode_namespace_list
else [ALL_NAMESPACES]
)
else:
namespaces_to_watch = [self.kube_config.kube_namespace]
for namespace in namespaces_to_watch:
watchers[namespace] = self._make_kube_watcher(namespace)
return watchers
def _health_check_kube_watchers(self):
for namespace, kube_watcher in self.kube_watchers.items():
if kube_watcher.is_alive():
self.log.debug("KubeJobWatcher for namespace %s alive, continuing", namespace)
else:
self.log.error(
(
"Error while health checking kube watcher process for namespace %s. "
"Process died for unknown reasons"
),
namespace,
)
ResourceVersion().resource_version[namespace] = "0"
self.kube_watchers[namespace] = self._make_kube_watcher(namespace)
def run_next(self, next_job: KubernetesJobType) -> None:
"""Receives the next job to run, builds the pod, and creates it."""
key, command, kube_executor_config, pod_template_file = next_job
dag_id, task_id, run_id, try_number, map_index = key
if command[0:3] != ["airflow", "tasks", "run"]:
raise ValueError('The command must start with ["airflow", "tasks", "run"].')
base_worker_pod = get_base_pod_from_template(pod_template_file, self.kube_config)
if not base_worker_pod:
raise AirflowException(
f"could not find a valid worker template yaml at {self.kube_config.pod_template_file}"
)
pod = PodGenerator.construct_pod(
namespace=self.namespace,
scheduler_job_id=self.scheduler_job_id,
pod_id=create_pod_id(dag_id, task_id),
dag_id=dag_id,
task_id=task_id,
kube_image=self.kube_config.kube_image,
try_number=try_number,
map_index=map_index,
date=None,
run_id=run_id,
args=command,
pod_override_object=kube_executor_config,
base_worker_pod=base_worker_pod,
with_mutation_hook=True,
)
# Reconcile the pod generated by the Operator and the Pod
# generated by the .cfg file
self.log.info(
"Creating kubernetes pod for job is %s, with pod name %s, annotations: %s",
key,
pod.metadata.name,
annotations_for_logging_task_metadata(pod.metadata.annotations),
)
self.log.debug("Kubernetes running for command %s", command)
self.log.debug("Kubernetes launching image %s", pod.spec.containers[0].image)
# the watcher will monitor pods, so we do not block.
self.run_pod_async(pod, **self.kube_config.kube_client_request_args)
self.log.debug("Kubernetes Job created!")
def delete_pod(self, pod_name: str, namespace: str) -> None:
"""Deletes Pod from a namespace. Does not raise if it does not exist."""
try:
self.log.debug("Deleting pod %s in namespace %s", pod_name, namespace)
self.kube_client.delete_namespaced_pod(
pod_name,
namespace,
body=client.V1DeleteOptions(**self.kube_config.delete_option_kwargs),
**self.kube_config.kube_client_request_args,
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def patch_pod_executor_done(self, *, pod_name: str, namespace: str):
"""Add a "done" annotation to ensure we don't continually adopt pods."""
self.log.debug("Patching pod %s in namespace %s to mark it as done", pod_name, namespace)
try:
self.kube_client.patch_namespaced_pod(
name=pod_name,
namespace=namespace,
body={"metadata": {"labels": {POD_EXECUTOR_DONE_KEY: "True"}}},
)
except ApiException as e:
self.log.info("Failed to patch pod %s with done annotation. Reason: %s", pod_name, e)
def sync(self) -> None:
"""
Checks the status of all currently running kubernetes jobs.
If a job is completed, its status is placed in the result queue to be sent back to the scheduler.
"""
self.log.debug("Syncing KubernetesExecutor")
self._health_check_kube_watchers()
while True:
try:
task = self.watcher_queue.get_nowait()
try:
self.log.debug("Processing task %s", task)
self.process_watcher_task(task)
finally:
self.watcher_queue.task_done()
except Empty:
break
def process_watcher_task(self, task: KubernetesWatchType) -> None:
"""Process the task by watcher."""
pod_name, namespace, state, annotations, resource_version = task
self.log.debug(
"Attempting to finish pod; pod_name: %s; state: %s; annotations: %s",
pod_name,
state,
annotations_for_logging_task_metadata(annotations),
)
key = annotations_to_key(annotations=annotations)
if key:
self.log.debug("finishing job %s - %s (%s)", key, state, pod_name)
self.result_queue.put((key, state, pod_name, namespace, resource_version))
def _flush_watcher_queue(self) -> None:
self.log.debug("Executor shutting down, watcher_queue approx. size=%d", self.watcher_queue.qsize())
while True:
try:
task = self.watcher_queue.get_nowait()
# Ignoring it since it can only have either FAILED or SUCCEEDED pods
self.log.warning("Executor shutting down, IGNORING watcher task=%s", task)
self.watcher_queue.task_done()
except Empty:
break
def terminate(self) -> None:
"""Terminates the watcher."""
self.log.debug("Terminating kube_watchers...")
for namespace, kube_watcher in self.kube_watchers.items():
kube_watcher.terminate()
kube_watcher.join()
self.log.debug("kube_watcher=%s", kube_watcher)
self.log.debug("Flushing watcher_queue...")
self._flush_watcher_queue()
# Queue should be empty...
self.watcher_queue.join()
self.log.debug("Shutting down manager...")
self._manager.shutdown()
def get_base_pod_from_template(pod_template_file: str | None, kube_config: Any) -> k8s.V1Pod:
"""
Get base pod from template.
Reads either the pod_template_file set in the executor_config or the base pod_template_file
set in the airflow.cfg to craft a "base pod" that will be used by the KubernetesExecutor
:param pod_template_file: absolute path to a pod_template_file.yaml or None
:param kube_config: The KubeConfig class generated by airflow that contains all kube metadata
:return: a V1Pod that can be used as the base pod for k8s tasks
"""
if pod_template_file:
return PodGenerator.deserialize_model_file(pod_template_file)
else:
return PodGenerator.deserialize_model_file(kube_config.pod_template_file)
| 20,249 | 41.364017 | 109 |
py
|
airflow
|
airflow-main/airflow/executors/kubernetes_executor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
KubernetesExecutor.
.. seealso::
For more information on how the KubernetesExecutor works, take a look at the guide:
:ref:`executor:KubernetesExecutor`
"""
from __future__ import annotations
import json
import logging
import multiprocessing
import time
from collections import defaultdict
from contextlib import suppress
from datetime import datetime
from queue import Empty, Queue
from typing import TYPE_CHECKING, Any, Sequence
from sqlalchemy.orm import Session
from airflow.configuration import conf
from airflow.exceptions import PodMutationHookException, PodReconciliationError
from airflow.executors.base_executor import BaseExecutor
from airflow.executors.kubernetes_executor_types import POD_EXECUTOR_DONE_KEY
from airflow.kubernetes.kube_config import KubeConfig
from airflow.kubernetes.kubernetes_helper_functions import annotations_to_key
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.log.logging_mixin import remove_escape_codes
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import State, TaskInstanceState
if TYPE_CHECKING:
from kubernetes import client
from kubernetes.client import models as k8s
from airflow.executors.base_executor import CommandType
from airflow.executors.kubernetes_executor_types import (
KubernetesJobType,
KubernetesResultsType,
)
from airflow.executors.kubernetes_executor_utils import AirflowKubernetesScheduler
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskinstancekey import TaskInstanceKey
class KubernetesExecutor(BaseExecutor):
"""Executor for Kubernetes."""
supports_ad_hoc_ti_run: bool = True
def __init__(self):
self.kube_config = KubeConfig()
self._manager = multiprocessing.Manager()
self.task_queue: Queue[KubernetesJobType] = self._manager.Queue()
self.result_queue: Queue[KubernetesResultsType] = self._manager.Queue()
self.kube_scheduler: AirflowKubernetesScheduler | None = None
self.kube_client: client.CoreV1Api | None = None
self.scheduler_job_id: str | None = None
self.event_scheduler: EventScheduler | None = None
self.last_handled: dict[TaskInstanceKey, float] = {}
self.kubernetes_queue: str | None = None
super().__init__(parallelism=self.kube_config.parallelism)
def _list_pods(self, query_kwargs):
if self.kube_config.multi_namespace_mode:
if self.kube_config.multi_namespace_mode_namespace_list:
pods = []
for namespace in self.kube_config.multi_namespace_mode_namespace_list:
pods.extend(
self.kube_client.list_namespaced_pod(namespace=namespace, **query_kwargs).items
)
else:
pods = self.kube_client.list_pod_for_all_namespaces(**query_kwargs).items
else:
pods = self.kube_client.list_namespaced_pod(
namespace=self.kube_config.kube_namespace, **query_kwargs
).items
return pods
def _make_safe_label_value(self, input_value: str | datetime) -> str:
"""
Normalize a provided label to be of valid length and characters.
See airflow.kubernetes.pod_generator.make_safe_label_value for more details.
"""
# airflow.kubernetes is an expensive import, locally import it here to
# speed up load times of the kubernetes_executor module.
from airflow.kubernetes import pod_generator
if isinstance(input_value, datetime):
return pod_generator.datetime_to_label_safe_datestring(input_value)
return pod_generator.make_safe_label_value(input_value)
@provide_session
def clear_not_launched_queued_tasks(self, session: Session = NEW_SESSION) -> None:
"""
Clear tasks that were not yet launched, but were previously queued.
Tasks can end up in a "Queued" state when a rescheduled/deferred operator
comes back up for execution (with the same try_number) before the
pod of its previous incarnation has been fully removed (we think).
It's also possible when an executor abruptly shuts down (leaving a non-empty
task_queue on that executor), but that scenario is handled via normal adoption.
This method checks each of our queued tasks to see if the corresponding pod
is around, and if not, and there's no matching entry in our own
task_queue, marks it for re-execution.
"""
if TYPE_CHECKING:
assert self.kube_client
from airflow.models.taskinstance import TaskInstance
self.log.debug("Clearing tasks that have not been launched")
query = session.query(TaskInstance).filter(
TaskInstance.state == TaskInstanceState.QUEUED, TaskInstance.queued_by_job_id == self.job_id
)
if self.kubernetes_queue:
query = query.filter(TaskInstance.queue == self.kubernetes_queue)
queued_tis: list[TaskInstance] = query.all()
self.log.info("Found %s queued task instances", len(queued_tis))
# Go through the "last seen" dictionary and clean out old entries
allowed_age = self.kube_config.worker_pods_queued_check_interval * 3
for key, timestamp in list(self.last_handled.items()):
if time.time() - timestamp > allowed_age:
del self.last_handled[key]
for ti in queued_tis:
self.log.debug("Checking task instance %s", ti)
# Check to see if we've handled it ourselves recently
if ti.key in self.last_handled:
continue
# Build the pod selector
base_label_selector = (
f"dag_id={self._make_safe_label_value(ti.dag_id)},"
f"task_id={self._make_safe_label_value(ti.task_id)},"
f"airflow-worker={self._make_safe_label_value(str(ti.queued_by_job_id))}"
)
if ti.map_index >= 0:
# Old tasks _couldn't_ be mapped, so we don't have to worry about compat
base_label_selector += f",map_index={ti.map_index}"
kwargs = dict(label_selector=base_label_selector)
if self.kube_config.kube_client_request_args:
kwargs.update(**self.kube_config.kube_client_request_args)
# Try run_id first
kwargs["label_selector"] += ",run_id=" + self._make_safe_label_value(ti.run_id)
pod_list = self._list_pods(kwargs)
if pod_list:
continue
# Fallback to old style of using execution_date
kwargs[
"label_selector"
] = f"{base_label_selector},execution_date={self._make_safe_label_value(ti.execution_date)}"
pod_list = self._list_pods(kwargs)
if pod_list:
continue
self.log.info("TaskInstance: %s found in queued state but was not launched, rescheduling", ti)
session.query(TaskInstance).filter(
TaskInstance.dag_id == ti.dag_id,
TaskInstance.task_id == ti.task_id,
TaskInstance.run_id == ti.run_id,
TaskInstance.map_index == ti.map_index,
).update({TaskInstance.state: TaskInstanceState.SCHEDULED})
def start(self) -> None:
"""Starts the executor."""
self.log.info("Start Kubernetes executor")
self.scheduler_job_id = str(self.job_id)
self.log.debug("Start with scheduler_job_id: %s", self.scheduler_job_id)
from airflow.executors.kubernetes_executor_utils import AirflowKubernetesScheduler
from airflow.kubernetes.kube_client import get_kube_client
self.kube_client = get_kube_client()
self.kube_scheduler = AirflowKubernetesScheduler(
kube_config=self.kube_config,
result_queue=self.result_queue,
kube_client=self.kube_client,
scheduler_job_id=self.scheduler_job_id,
)
self.event_scheduler = EventScheduler()
self.event_scheduler.call_regular_interval(
self.kube_config.worker_pods_queued_check_interval,
self.clear_not_launched_queued_tasks,
)
# We also call this at startup as that's the most likely time to see
# stuck queued tasks
self.clear_not_launched_queued_tasks()
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: str | None = None,
executor_config: Any | None = None,
) -> None:
"""Executes task asynchronously."""
if TYPE_CHECKING:
assert self.task_queue
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug("Add task %s with command %s, executor_config %s", key, command, executor_config)
else:
self.log.info("Add task %s with command %s", key, command)
from airflow.kubernetes.pod_generator import PodGenerator
try:
kube_executor_config = PodGenerator.from_obj(executor_config)
except Exception:
self.log.error("Invalid executor_config for %s. Executor_config: %s", key, executor_config)
self.fail(key=key, info="Invalid executor_config passed")
return
if executor_config:
pod_template_file = executor_config.get("pod_template_file", None)
else:
pod_template_file = None
self.event_buffer[key] = (TaskInstanceState.QUEUED, self.scheduler_job_id)
self.task_queue.put((key, command, kube_executor_config, pod_template_file))
# We keep a temporary local record that we've handled this so we don't
# try and remove it from the QUEUED state while we process it
self.last_handled[key] = time.time()
def sync(self) -> None:
"""Synchronize task state."""
if TYPE_CHECKING:
assert self.scheduler_job_id
assert self.kube_scheduler
assert self.kube_config
assert self.result_queue
assert self.task_queue
assert self.event_scheduler
if self.running:
self.log.debug("self.running: %s", self.running)
if self.queued_tasks:
self.log.debug("self.queued: %s", self.queued_tasks)
self.kube_scheduler.sync()
last_resource_version: dict[str, str] = defaultdict(lambda: "0")
while True:
try:
results = self.result_queue.get_nowait()
try:
key, state, pod_name, namespace, resource_version = results
last_resource_version[namespace] = resource_version
self.log.info("Changing state of %s to %s", results, state)
try:
self._change_state(key, state, pod_name, namespace)
except Exception as e:
self.log.exception(
"Exception: %s when attempting to change state of %s to %s, re-queueing.",
e,
results,
state,
)
self.result_queue.put(results)
finally:
self.result_queue.task_done()
except Empty:
break
from airflow.executors.kubernetes_executor_utils import ResourceVersion
resource_instance = ResourceVersion()
for ns in resource_instance.resource_version.keys():
resource_instance.resource_version[ns] = (
last_resource_version[ns] or resource_instance.resource_version[ns]
)
from kubernetes.client.rest import ApiException
for _ in range(self.kube_config.worker_pods_creation_batch_size):
try:
task = self.task_queue.get_nowait()
try:
self.kube_scheduler.run_next(task)
except PodReconciliationError as e:
self.log.error(
"Pod reconciliation failed, likely due to kubernetes library upgrade. "
"Try clearing the task to re-run.",
exc_info=True,
)
self.fail(task[0], e)
except ApiException as e:
# These codes indicate something is wrong with pod definition; otherwise we assume pod
# definition is ok, and that retrying may work
if e.status in (400, 422):
self.log.error("Pod creation failed with reason %r. Failing task", e.reason)
key, _, _, _ = task
self.change_state(key, TaskInstanceState.FAILED, e)
else:
self.log.warning(
"ApiException when attempting to run task, re-queueing. Reason: %r. Message: %s",
e.reason,
json.loads(e.body)["message"],
)
self.task_queue.put(task)
except PodMutationHookException as e:
key, _, _, _ = task
self.log.error(
"Pod Mutation Hook failed for the task %s. Failing task. Details: %s",
key,
e.__cause__,
)
self.fail(key, e)
finally:
self.task_queue.task_done()
except Empty:
break
# Run any pending timed events
next_event = self.event_scheduler.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
@provide_session
def _change_state(
self,
key: TaskInstanceKey,
state: str | None,
pod_name: str,
namespace: str,
session: Session = NEW_SESSION,
) -> None:
if TYPE_CHECKING:
assert self.kube_scheduler
if state == State.RUNNING:
self.event_buffer[key] = state, None
return
if self.kube_config.delete_worker_pods:
if state != State.FAILED or self.kube_config.delete_worker_pods_on_failure:
self.kube_scheduler.delete_pod(pod_name=pod_name, namespace=namespace)
self.log.info("Deleted pod: %s in namespace %s", str(key), str(namespace))
else:
self.kube_scheduler.patch_pod_executor_done(pod_name=pod_name, namespace=namespace)
self.log.info("Patched pod %s in namespace %s to mark it as done", str(key), str(namespace))
try:
self.running.remove(key)
except KeyError:
self.log.debug("TI key not in running, not adding to event_buffer: %s", key)
# If we don't have a TI state, look it up from the db. event_buffer expects the TI state
if state is None:
from airflow.models.taskinstance import TaskInstance
state = session.query(TaskInstance.state).filter(TaskInstance.filter_for_tis([key])).scalar()
self.event_buffer[key] = state, None
@staticmethod
def _get_pod_namespace(ti: TaskInstance):
pod_override = ti.executor_config.get("pod_override")
namespace = None
with suppress(Exception):
namespace = pod_override.metadata.namespace
return namespace or conf.get("kubernetes_executor", "namespace", fallback="default")
def get_task_log(self, ti: TaskInstance, try_number: int) -> tuple[list[str], list[str]]:
messages = []
log = []
try:
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodGenerator
client = get_kube_client()
messages.append(f"Attempting to fetch logs from pod {ti.hostname} through kube API")
selector = PodGenerator.build_selector_for_k8s_executor_pod(
dag_id=ti.dag_id,
task_id=ti.task_id,
try_number=try_number,
map_index=ti.map_index,
run_id=ti.run_id,
airflow_worker=ti.queued_by_job_id,
)
namespace = self._get_pod_namespace(ti)
pod_list = client.list_namespaced_pod(
namespace=namespace,
label_selector=selector,
).items
if not pod_list:
raise RuntimeError("Cannot find pod for ti %s", ti)
elif len(pod_list) > 1:
raise RuntimeError("Found multiple pods for ti %s: %s", ti, pod_list)
res = client.read_namespaced_pod_log(
name=pod_list[0].metadata.name,
namespace=namespace,
container="base",
follow=False,
tail_lines=100,
_preload_content=False,
)
for line in res:
log.append(remove_escape_codes(line.decode()))
if log:
messages.append("Found logs through kube API")
except Exception as e:
messages.append(f"Reading from k8s pod logs failed: {str(e)}")
return messages, ["\n".join(log)]
def try_adopt_task_instances(self, tis: Sequence[TaskInstance]) -> Sequence[TaskInstance]:
# Always flush TIs without queued_by_job_id
tis_to_flush = [ti for ti in tis if not ti.queued_by_job_id]
scheduler_job_ids = {ti.queued_by_job_id for ti in tis}
tis_to_flush_by_key = {ti.key: ti for ti in tis if ti.queued_by_job_id}
kube_client: client.CoreV1Api = self.kube_client
for scheduler_job_id in scheduler_job_ids:
scheduler_job_id = self._make_safe_label_value(str(scheduler_job_id))
# We will look for any pods owned by the no-longer-running scheduler,
# but will exclude only successful pods, as those TIs will have a terminal state
# and not be up for adoption!
# Those workers that failed, however, are okay to adopt here as their TI will
# still be in queued.
query_kwargs = {
"field_selector": "status.phase!=Succeeded",
"label_selector": (
"kubernetes_executor=True,"
f"airflow-worker={scheduler_job_id},{POD_EXECUTOR_DONE_KEY}!=True"
),
}
pod_list = self._list_pods(query_kwargs)
for pod in pod_list:
self.adopt_launched_task(kube_client, pod, tis_to_flush_by_key)
self._adopt_completed_pods(kube_client)
tis_to_flush.extend(tis_to_flush_by_key.values())
return tis_to_flush
def cleanup_stuck_queued_tasks(self, tis: list[TaskInstance]) -> list[str]:
"""
Handle remnants of tasks that were failed because they were stuck in queued.
Tasks can get stuck in queued. If such a task is detected, it will be marked
as `UP_FOR_RETRY` if the task instance has remaining retries or marked as `FAILED`
if it doesn't.
:param tis: List of Task Instances to clean up
:return: List of readable task instances for a warning message
"""
from airflow.kubernetes.pod_generator import PodGenerator
if TYPE_CHECKING:
assert self.kube_client
assert self.kube_scheduler
readable_tis = []
for ti in tis:
selector = PodGenerator.build_selector_for_k8s_executor_pod(
dag_id=ti.dag_id,
task_id=ti.task_id,
try_number=ti.try_number,
map_index=ti.map_index,
run_id=ti.run_id,
airflow_worker=ti.queued_by_job_id,
)
namespace = self._get_pod_namespace(ti)
pod_list = self.kube_client.list_namespaced_pod(
namespace=namespace,
label_selector=selector,
).items
if not pod_list:
self.log.warning("Cannot find pod for ti %s", ti)
continue
elif len(pod_list) > 1:
self.log.warning("Found multiple pods for ti %s: %s", ti, pod_list)
continue
readable_tis.append(repr(ti))
self.kube_scheduler.delete_pod(pod_name=pod_list[0].metadata.name, namespace=namespace)
return readable_tis
def adopt_launched_task(
self,
kube_client: client.CoreV1Api,
pod: k8s.V1Pod,
tis_to_flush_by_key: dict[TaskInstanceKey, k8s.V1Pod],
) -> None:
"""
Patch existing pod so that the current KubernetesJobWatcher can monitor it via label selectors.
:param kube_client: kubernetes client for speaking to kube API
:param pod: V1Pod spec that we will patch with new label
:param tis_to_flush_by_key: TIs that will be flushed if they aren't adopted
"""
if TYPE_CHECKING:
assert self.scheduler_job_id
self.log.info("attempting to adopt pod %s", pod.metadata.name)
ti_key = annotations_to_key(pod.metadata.annotations)
if ti_key not in tis_to_flush_by_key:
self.log.error("attempting to adopt taskinstance which was not specified by database: %s", ti_key)
return
new_worker_id_label = self._make_safe_label_value(self.scheduler_job_id)
from kubernetes.client.rest import ApiException
try:
kube_client.patch_namespaced_pod(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
body={"metadata": {"labels": {"airflow-worker": new_worker_id_label}}},
)
except ApiException as e:
self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)
return
del tis_to_flush_by_key[ti_key]
self.running.add(ti_key)
def _adopt_completed_pods(self, kube_client: client.CoreV1Api) -> None:
"""
Patch completed pods so that the KubernetesJobWatcher can delete them.
:param kube_client: kubernetes client for speaking to kube API
"""
if TYPE_CHECKING:
assert self.scheduler_job_id
new_worker_id_label = self._make_safe_label_value(self.scheduler_job_id)
query_kwargs = {
"field_selector": "status.phase=Succeeded",
"label_selector": (
"kubernetes_executor=True,"
f"airflow-worker!={new_worker_id_label},{POD_EXECUTOR_DONE_KEY}!=True"
),
}
pod_list = self._list_pods(query_kwargs)
for pod in pod_list:
self.log.info("Attempting to adopt pod %s", pod.metadata.name)
from kubernetes.client.rest import ApiException
try:
kube_client.patch_namespaced_pod(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
body={"metadata": {"labels": {"airflow-worker": new_worker_id_label}}},
)
except ApiException as e:
self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)
ti_id = annotations_to_key(pod.metadata.annotations)
self.running.add(ti_id)
def _flush_task_queue(self) -> None:
if TYPE_CHECKING:
assert self.task_queue
self.log.debug("Executor shutting down, task_queue approximate size=%d", self.task_queue.qsize())
while True:
try:
task = self.task_queue.get_nowait()
# This is a new task to run thus ok to ignore.
self.log.warning("Executor shutting down, will NOT run task=%s", task)
self.task_queue.task_done()
except Empty:
break
def _flush_result_queue(self) -> None:
if TYPE_CHECKING:
assert self.result_queue
self.log.debug("Executor shutting down, result_queue approximate size=%d", self.result_queue.qsize())
while True:
try:
results = self.result_queue.get_nowait()
self.log.warning("Executor shutting down, flushing results=%s", results)
try:
key, state, pod_name, namespace, resource_version = results
self.log.info(
"Changing state of %s to %s : resource_version=%d", results, state, resource_version
)
try:
self._change_state(key, state, pod_name, namespace)
except Exception as e:
self.log.exception(
"Ignoring exception: %s when attempting to change state of %s to %s.",
e,
results,
state,
)
finally:
self.result_queue.task_done()
except Empty:
break
def end(self) -> None:
"""Called when the executor shuts down."""
if TYPE_CHECKING:
assert self.task_queue
assert self.result_queue
assert self.kube_scheduler
self.log.info("Shutting down Kubernetes executor")
try:
self.log.debug("Flushing task_queue...")
self._flush_task_queue()
self.log.debug("Flushing result_queue...")
self._flush_result_queue()
# Both queues should be empty...
self.task_queue.join()
self.result_queue.join()
except ConnectionResetError:
self.log.exception("Connection Reset error while flushing task_queue and result_queue.")
if self.kube_scheduler:
self.kube_scheduler.terminate()
self._manager.shutdown()
def terminate(self):
"""Terminate the executor is not doing anything."""
| 26,997 | 41.516535 | 110 |
py
|
airflow
|
airflow-main/airflow/executors/debug_executor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
DebugExecutor.
.. seealso::
For more information on how the DebugExecutor works, take a look at the guide:
:ref:`executor:DebugExecutor`
"""
from __future__ import annotations
import threading
import time
from typing import TYPE_CHECKING, Any
from airflow.executors.base_executor import BaseExecutor
from airflow.utils.state import State
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskinstancekey import TaskInstanceKey
class DebugExecutor(BaseExecutor):
"""
This executor is meant for debugging purposes. It can be used with SQLite.
It executes one task instance at time. Additionally to support working
with sensors, all sensors ``mode`` will be automatically set to "reschedule".
"""
_terminated = threading.Event()
is_single_threaded: bool = True
is_production: bool = False
change_sensor_mode_to_reschedule: bool = True
def __init__(self):
super().__init__()
self.tasks_to_run: list[TaskInstance] = []
# Place where we keep information for task instance raw run
self.tasks_params: dict[TaskInstanceKey, dict[str, Any]] = {}
from airflow.configuration import conf
self.fail_fast = conf.getboolean("debug", "fail_fast")
def execute_async(self, *args, **kwargs) -> None:
"""The method is replaced by custom trigger_task implementation."""
def sync(self) -> None:
task_succeeded = True
while self.tasks_to_run:
ti = self.tasks_to_run.pop(0)
if self.fail_fast and not task_succeeded:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
continue
if self._terminated.is_set():
self.log.info("Executor is terminated! Stopping %s to %s", ti.key, State.FAILED)
ti.set_state(State.FAILED)
self.change_state(ti.key, State.FAILED)
continue
task_succeeded = self._run_task(ti)
def _run_task(self, ti: TaskInstance) -> bool:
self.log.debug("Executing task: %s", ti)
key = ti.key
try:
params = self.tasks_params.pop(ti.key, {})
ti.run(job_id=ti.job_id, **params)
self.change_state(key, State.SUCCESS)
return True
except Exception as e:
ti.set_state(State.FAILED)
self.change_state(key, State.FAILED)
self.log.exception("Failed to execute task: %s.", str(e))
return False
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
pickle_id: int | None = None,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: str | None = None,
cfg_path: str | None = None,
) -> None:
"""Queues task instance with empty command because we do not need it."""
self.queue_command(
task_instance,
[str(task_instance)], # Just for better logging, it's not used anywhere
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue,
)
# Save params for TaskInstance._run_raw_task
self.tasks_params[task_instance.key] = {
"mark_success": mark_success,
"pool": pool,
}
def trigger_tasks(self, open_slots: int) -> None:
"""
Triggers tasks.
Instead of calling exec_async we just add task instance to tasks_to_run queue.
:param open_slots: Number of open slots
"""
if not self.queued_tasks:
# wait a bit if there are no tasks ready to be executed to avoid spinning too fast in the void
time.sleep(0.5)
return
sorted_queue = sorted(
self.queued_tasks.items(),
key=lambda x: x[1][1],
reverse=True,
)
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, (_, _, _, ti) = sorted_queue.pop(0)
self.queued_tasks.pop(key)
self.running.add(key)
self.tasks_to_run.append(ti) # type: ignore
def end(self) -> None:
"""Set states of queued tasks to UPSTREAM_FAILED marking them as not executed."""
for ti in self.tasks_to_run:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
def terminate(self) -> None:
self._terminated.set()
def change_state(self, key: TaskInstanceKey, state: str, info=None) -> None:
self.log.debug("Popping %s from executor task queue.", key)
self.running.remove(key)
self.event_buffer[key] = state, info
| 5,895 | 35.395062 | 106 |
py
|
airflow
|
airflow-main/airflow/executors/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Executors."""
from __future__ import annotations
from airflow.utils.deprecation_tools import add_deprecated_classes
__deprecated_classes = {
"celery_executor": {
"app": "airflow.providers.celery.executors.celery_executor_utils.app",
"CeleryExecutor": "airflow.providers.celery.executors.celery_executor.CeleryExecutor",
},
"celery_kubernetes_executor": {
"CeleryKubernetesExecutor": "airflow.providers.celery.executors."
"celery_kubernetes_executor.CeleryKubernetesExecutor",
},
}
add_deprecated_classes(__deprecated_classes, __name__)
| 1,375 | 39.470588 | 94 |
py
|
airflow
|
airflow-main/airflow/executors/local_executor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
LocalExecutor.
.. seealso::
For more information on how the LocalExecutor works, take a look at the guide:
:ref:`executor:LocalExecutor`
"""
from __future__ import annotations
import logging
import os
import subprocess
from abc import abstractmethod
from multiprocessing import Manager, Process
from multiprocessing.managers import SyncManager
from queue import Empty, Queue
from typing import TYPE_CHECKING, Any, Optional, Tuple
from setproctitle import getproctitle, setproctitle
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import PARALLELISM, BaseExecutor
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import TaskInstanceState
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
from airflow.models.taskinstance import TaskInstanceStateType
from airflow.models.taskinstancekey import TaskInstanceKey
# This is a work to be executed by a worker.
# It can Key and Command - but it can also be None, None which is actually a
# "Poison Pill" - worker seeing Poison Pill should take the pill and ... die instantly.
ExecutorWorkType = Tuple[Optional[TaskInstanceKey], Optional[CommandType]]
class LocalWorkerBase(Process, LoggingMixin):
"""
LocalWorkerBase implementation to run airflow commands.
Executes the given command and puts the result into a result queue when done, terminating execution.
:param result_queue: the queue to store result state
"""
def __init__(self, result_queue: Queue[TaskInstanceStateType]):
super().__init__(target=self.do_work)
self.daemon: bool = True
self.result_queue: Queue[TaskInstanceStateType] = result_queue
def run(self):
# We know we've just started a new process, so lets disconnect from the metadata db now
settings.engine.pool.dispose()
settings.engine.dispose()
setproctitle("airflow worker -- LocalExecutor")
return super().run()
def execute_work(self, key: TaskInstanceKey, command: CommandType) -> None:
"""
Executes command received and stores result state in queue.
:param key: the key to identify the task instance
:param command: the command to execute
"""
if key is None:
return
self.log.info("%s running %s", self.__class__.__name__, command)
setproctitle(f"airflow worker -- LocalExecutor: {command}")
if settings.EXECUTE_TASKS_NEW_PYTHON_INTERPRETER:
state = self._execute_work_in_subprocess(command)
else:
state = self._execute_work_in_fork(command)
self.result_queue.put((key, state))
# Remove the command since the worker is done executing the task
setproctitle("airflow worker -- LocalExecutor")
def _execute_work_in_subprocess(self, command: CommandType) -> TaskInstanceState:
try:
subprocess.check_call(command, close_fds=True)
return TaskInstanceState.SUCCESS
except subprocess.CalledProcessError as e:
self.log.error("Failed to execute task %s.", str(e))
return TaskInstanceState.FAILED
def _execute_work_in_fork(self, command: CommandType) -> TaskInstanceState:
pid = os.fork()
if pid:
# In parent, wait for the child
pid, ret = os.waitpid(pid, 0)
return TaskInstanceState.SUCCESS if ret == 0 else TaskInstanceState.FAILED
from airflow.sentry import Sentry
ret = 1
try:
import signal
from airflow.cli.cli_parser import get_parser
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGUSR2, signal.SIG_DFL)
parser = get_parser()
# [1:] - remove "airflow" from the start of the command
args = parser.parse_args(command[1:])
args.shut_down_logging = False
setproctitle(f"airflow task supervisor: {command}")
args.func(args)
ret = 0
return TaskInstanceState.SUCCESS
except Exception as e:
self.log.exception("Failed to execute task %s.", e)
return TaskInstanceState.FAILED
finally:
Sentry.flush()
logging.shutdown()
os._exit(ret)
@abstractmethod
def do_work(self):
"""Called in the subprocess and should then execute tasks."""
raise NotImplementedError()
class LocalWorker(LocalWorkerBase):
"""
Local worker that executes the task.
:param result_queue: queue where results of the tasks are put.
:param key: key identifying task instance
:param command: Command to execute
"""
def __init__(
self, result_queue: Queue[TaskInstanceStateType], key: TaskInstanceKey, command: CommandType
):
super().__init__(result_queue)
self.key: TaskInstanceKey = key
self.command: CommandType = command
def do_work(self) -> None:
self.execute_work(key=self.key, command=self.command)
class QueuedLocalWorker(LocalWorkerBase):
"""
LocalWorker implementation that is waiting for tasks from a queue.
Will continue executing commands as they become available in the queue.
It will terminate execution once the poison token is found.
:param task_queue: queue from which worker reads tasks
:param result_queue: queue where worker puts results after finishing tasks
"""
def __init__(self, task_queue: Queue[ExecutorWorkType], result_queue: Queue[TaskInstanceStateType]):
super().__init__(result_queue=result_queue)
self.task_queue = task_queue
def do_work(self) -> None:
while True:
try:
key, command = self.task_queue.get()
except EOFError:
self.log.info(
"Failed to read tasks from the task queue because the other "
"end has closed the connection. Terminating worker %s.",
self.name,
)
break
try:
if key is None or command is None:
# Received poison pill, no more tasks to run
break
self.execute_work(key=key, command=command)
finally:
self.task_queue.task_done()
class LocalExecutor(BaseExecutor):
"""
LocalExecutor executes tasks locally in parallel.
It uses the multiprocessing Python library and queues to parallelize the execution of tasks.
:param parallelism: how many parallel processes are run in the executor
"""
is_local: bool = True
supports_pickling: bool = False
serve_logs: bool = True
def __init__(self, parallelism: int = PARALLELISM):
super().__init__(parallelism=parallelism)
if self.parallelism < 0:
raise AirflowException("parallelism must be bigger than or equal to 0")
self.manager: SyncManager | None = None
self.result_queue: Queue[TaskInstanceStateType] | None = None
self.workers: list[QueuedLocalWorker] = []
self.workers_used: int = 0
self.workers_active: int = 0
self.impl: None | (LocalExecutor.UnlimitedParallelism | LocalExecutor.LimitedParallelism) = None
class UnlimitedParallelism:
"""
Implement LocalExecutor with unlimited parallelism, starting one process per command executed.
:param executor: the executor instance to implement.
"""
def __init__(self, executor: LocalExecutor):
self.executor: LocalExecutor = executor
def start(self) -> None:
"""Starts the executor."""
self.executor.workers_used = 0
self.executor.workers_active = 0
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: str | None = None,
executor_config: Any | None = None,
) -> None:
"""
Executes task asynchronously.
:param key: the key to identify the task instance
:param command: the command to execute
:param queue: Name of the queue
:param executor_config: configuration for the executor
"""
if TYPE_CHECKING:
assert self.executor.result_queue
local_worker = LocalWorker(self.executor.result_queue, key=key, command=command)
self.executor.workers_used += 1
self.executor.workers_active += 1
local_worker.start()
def sync(self) -> None:
"""Sync will get called periodically by the heartbeat method."""
if not self.executor.result_queue:
raise AirflowException("Executor should be started first")
while not self.executor.result_queue.empty():
results = self.executor.result_queue.get()
self.executor.change_state(*results)
self.executor.workers_active -= 1
def end(self) -> None:
"""Wait synchronously for the previously submitted job to complete."""
while self.executor.workers_active > 0:
self.executor.sync()
class LimitedParallelism:
"""
Implements LocalExecutor with limited parallelism.
Uses a task queue to coordinate work distribution.
:param executor: the executor instance to implement.
"""
def __init__(self, executor: LocalExecutor):
self.executor: LocalExecutor = executor
self.queue: Queue[ExecutorWorkType] | None = None
def start(self) -> None:
"""Starts limited parallelism implementation."""
if TYPE_CHECKING:
assert self.executor.manager
assert self.executor.result_queue
self.queue = self.executor.manager.Queue()
self.executor.workers = [
QueuedLocalWorker(self.queue, self.executor.result_queue)
for _ in range(self.executor.parallelism)
]
self.executor.workers_used = len(self.executor.workers)
for worker in self.executor.workers:
worker.start()
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: str | None = None,
executor_config: Any | None = None,
) -> None:
"""
Executes task asynchronously.
:param key: the key to identify the task instance
:param command: the command to execute
:param queue: name of the queue
:param executor_config: configuration for the executor
"""
if TYPE_CHECKING:
assert self.queue
self.queue.put((key, command))
def sync(self):
"""Sync will get called periodically by the heartbeat method."""
while True:
try:
results = self.executor.result_queue.get_nowait()
try:
self.executor.change_state(*results)
finally:
self.executor.result_queue.task_done()
except Empty:
break
def end(self):
"""Ends the executor. Sends the poison pill to all workers."""
for _ in self.executor.workers:
self.queue.put((None, None))
# Wait for commands to finish
self.queue.join()
self.executor.sync()
def start(self) -> None:
"""Starts the executor."""
old_proctitle = getproctitle()
setproctitle("airflow executor -- LocalExecutor")
self.manager = Manager()
setproctitle(old_proctitle)
self.result_queue = self.manager.Queue()
self.workers = []
self.workers_used = 0
self.workers_active = 0
self.impl = (
LocalExecutor.UnlimitedParallelism(self)
if self.parallelism == 0
else LocalExecutor.LimitedParallelism(self)
)
self.impl.start()
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: str | None = None,
executor_config: Any | None = None,
) -> None:
"""Execute asynchronously."""
if TYPE_CHECKING:
assert self.impl
self.validate_airflow_tasks_run_command(command)
self.impl.execute_async(key=key, command=command, queue=queue, executor_config=executor_config)
def sync(self) -> None:
"""Sync will get called periodically by the heartbeat method."""
if TYPE_CHECKING:
assert self.impl
self.impl.sync()
def end(self) -> None:
"""Ends the executor."""
if TYPE_CHECKING:
assert self.impl
assert self.manager
self.log.info(
"Shutting down LocalExecutor"
"; waiting for running tasks to finish. Signal again if you don't want to wait."
)
self.impl.end()
self.manager.shutdown()
def terminate(self):
"""Terminate the executor is not doing anything."""
| 14,239 | 34.073892 | 104 |
py
|
airflow
|
airflow-main/airflow/executors/base_executor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base executor - this is the base class for all the implemented executors."""
from __future__ import annotations
import logging
import sys
import warnings
from collections import OrderedDict, defaultdict
from dataclasses import dataclass, field
from datetime import datetime
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Tuple
import pendulum
from airflow.configuration import conf
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.stats import Stats
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
PARALLELISM: int = conf.getint("core", "PARALLELISM")
if TYPE_CHECKING:
from airflow.callbacks.base_callback_sink import BaseCallbackSink
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskinstancekey import TaskInstanceKey
# Command to execute - list of strings
# the first element is always "airflow".
# It should be result of TaskInstance.generate_command method.
CommandType = List[str]
# Task that is queued. It contains all the information that is
# needed to run the task.
#
# Tuple of: command, priority, queue name, TaskInstance
QueuedTaskInstanceType = Tuple[CommandType, int, Optional[str], TaskInstance]
# Event_buffer dict value type
# Tuple of: state, info
EventBufferValueType = Tuple[Optional[str], Any]
# Task tuple to send to be executed
TaskTuple = Tuple[TaskInstanceKey, CommandType, Optional[str], Optional[Any]]
log = logging.getLogger(__name__)
@dataclass
class RunningRetryAttemptType:
"""
For keeping track of attempts to queue again when task still apparently running.
We don't want to slow down the loop, so we don't block, but we allow it to be
re-checked for at least MIN_SECONDS seconds.
"""
MIN_SECONDS = 10
total_tries: int = field(default=0, init=False)
tries_after_min: int = field(default=0, init=False)
first_attempt_time: datetime = field(default_factory=lambda: pendulum.now("UTC"), init=False)
@property
def elapsed(self):
"""Seconds since first attempt."""
return (pendulum.now("UTC") - self.first_attempt_time).total_seconds()
def can_try_again(self):
"""Return False if there has been at least one try greater than MIN_SECONDS, otherwise return True."""
if self.tries_after_min > 0:
return False
self.total_tries += 1
elapsed = self.elapsed
if elapsed > self.MIN_SECONDS:
self.tries_after_min += 1
log.debug("elapsed=%s tries=%s", elapsed, self.total_tries)
return True
class BaseExecutor(LoggingMixin):
"""
Base class to inherit for concrete executors such as Celery, Kubernetes, Local, Sequential, etc.
:param parallelism: how many jobs should run at one time. Set to ``0`` for infinity.
"""
supports_ad_hoc_ti_run: bool = False
supports_pickling: bool = True
supports_sentry: bool = False
is_local: bool = False
is_single_threaded: bool = False
is_production: bool = True
change_sensor_mode_to_reschedule: bool = False
serve_logs: bool = False
job_id: None | int | str = None
callback_sink: BaseCallbackSink | None = None
def __init__(self, parallelism: int = PARALLELISM):
super().__init__()
self.parallelism: int = parallelism
self.queued_tasks: OrderedDict[TaskInstanceKey, QueuedTaskInstanceType] = OrderedDict()
self.running: set[TaskInstanceKey] = set()
self.event_buffer: dict[TaskInstanceKey, EventBufferValueType] = {}
self.attempts: dict[TaskInstanceKey, RunningRetryAttemptType] = defaultdict(RunningRetryAttemptType)
def __repr__(self):
return f"{self.__class__.__name__}(parallelism={self.parallelism})"
def start(self): # pragma: no cover
"""Executors may need to get things started."""
def queue_command(
self,
task_instance: TaskInstance,
command: CommandType,
priority: int = 1,
queue: str | None = None,
):
"""Queues command to task."""
if task_instance.key not in self.queued_tasks:
self.log.info("Adding to queue: %s", command)
self.queued_tasks[task_instance.key] = (command, priority, queue, task_instance)
else:
self.log.error("could not queue task %s", task_instance.key)
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
pickle_id: int | None = None,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: str | None = None,
cfg_path: str | None = None,
) -> None:
"""Queues task instance."""
pool = pool or task_instance.pool
# TODO (edgarRd): AIRFLOW-1985:
# cfg_path is needed to propagate the config values if using impersonation
# (run_as_user), given that there are different code paths running tasks.
# For a long term solution we need to address AIRFLOW-1986
command_list_to_run = task_instance.command_as_list(
local=True,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
pool=pool,
pickle_id=pickle_id,
cfg_path=cfg_path,
)
self.log.debug("created command %s", command_list_to_run)
self.queue_command(
task_instance,
command_list_to_run,
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue,
)
def has_task(self, task_instance: TaskInstance) -> bool:
"""
Checks if a task is either queued or running in this executor.
:param task_instance: TaskInstance
:return: True if the task is known to this executor
"""
return task_instance.key in self.queued_tasks or task_instance.key in self.running
def sync(self) -> None:
"""
Sync will get called periodically by the heartbeat method.
Executors should override this to perform gather statuses.
"""
def heartbeat(self) -> None:
"""Heartbeat sent to trigger new jobs."""
if not self.parallelism:
open_slots = len(self.queued_tasks)
else:
open_slots = self.parallelism - len(self.running)
num_running_tasks = len(self.running)
num_queued_tasks = len(self.queued_tasks)
self.log.debug("%s running task instances", num_running_tasks)
self.log.debug("%s in queue", num_queued_tasks)
self.log.debug("%s open slots", open_slots)
Stats.gauge(
"executor.open_slots", value=open_slots, tags={"status": "open", "name": self.__class__.__name__}
)
Stats.gauge(
"executor.queued_tasks",
value=num_queued_tasks,
tags={"status": "queued", "name": self.__class__.__name__},
)
Stats.gauge(
"executor.running_tasks",
value=num_running_tasks,
tags={"status": "running", "name": self.__class__.__name__},
)
self.trigger_tasks(open_slots)
# Calling child class sync method
self.log.debug("Calling the %s sync method", self.__class__)
self.sync()
def order_queued_tasks_by_priority(self) -> list[tuple[TaskInstanceKey, QueuedTaskInstanceType]]:
"""
Orders the queued tasks by priority.
:return: List of tuples from the queued_tasks according to the priority.
"""
return sorted(
self.queued_tasks.items(),
key=lambda x: x[1][1],
reverse=True,
)
def trigger_tasks(self, open_slots: int) -> None:
"""
Initiates async execution of the queued tasks, up to the number of available slots.
:param open_slots: Number of open slots
"""
sorted_queue = self.order_queued_tasks_by_priority()
task_tuples = []
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, (command, _, queue, ti) = sorted_queue.pop(0)
# If a task makes it here but is still understood by the executor
# to be running, it generally means that the task has been killed
# externally and not yet been marked as failed.
#
# However, when a task is deferred, there is also a possibility of
# a race condition where a task might be scheduled again during
# trigger processing, even before we are able to register that the
# deferred task has completed. In this case and for this reason,
# we make a small number of attempts to see if the task has been
# removed from the running set in the meantime.
if key in self.running:
attempt = self.attempts[key]
if attempt.can_try_again():
# if it hasn't been much time since first check, let it be checked again next time
self.log.info("queued but still running; attempt=%s task=%s", attempt.total_tries, key)
continue
# Otherwise, we give up and remove the task from the queue.
self.log.error(
"could not queue task %s (still running after %d attempts)", key, attempt.total_tries
)
del self.attempts[key]
del self.queued_tasks[key]
else:
if key in self.attempts:
del self.attempts[key]
task_tuples.append((key, command, queue, ti.executor_config))
if task_tuples:
self._process_tasks(task_tuples)
def _process_tasks(self, task_tuples: list[TaskTuple]) -> None:
for key, command, queue, executor_config in task_tuples:
del self.queued_tasks[key]
self.execute_async(key=key, command=command, queue=queue, executor_config=executor_config)
self.running.add(key)
def change_state(self, key: TaskInstanceKey, state: str, info=None) -> None:
"""
Changes state of the task.
:param info: Executor information for the task instance
:param key: Unique key for the task instance
:param state: State to set for the task.
"""
self.log.debug("Changing state: %s", key)
try:
self.running.remove(key)
except KeyError:
self.log.debug("Could not find key: %s", str(key))
self.event_buffer[key] = state, info
def fail(self, key: TaskInstanceKey, info=None) -> None:
"""
Set fail state for the event.
:param info: Executor information for the task instance
:param key: Unique key for the task instance
"""
self.change_state(key, State.FAILED, info)
def success(self, key: TaskInstanceKey, info=None) -> None:
"""
Set success state for the event.
:param info: Executor information for the task instance
:param key: Unique key for the task instance
"""
self.change_state(key, State.SUCCESS, info)
def get_event_buffer(self, dag_ids=None) -> dict[TaskInstanceKey, EventBufferValueType]:
"""
Return and flush the event buffer.
In case dag_ids is specified it will only return and flush events
for the given dag_ids. Otherwise, it returns and flushes all events.
:param dag_ids: the dag_ids to return events for; returns all if given ``None``.
:return: a dict of events
"""
cleared_events: dict[TaskInstanceKey, EventBufferValueType] = {}
if dag_ids is None:
cleared_events = self.event_buffer
self.event_buffer = {}
else:
for ti_key in list(self.event_buffer.keys()):
if ti_key.dag_id in dag_ids:
cleared_events[ti_key] = self.event_buffer.pop(ti_key)
return cleared_events
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: str | None = None,
executor_config: Any | None = None,
) -> None: # pragma: no cover
"""
This method will execute the command asynchronously.
:param key: Unique key for the task instance
:param command: Command to run
:param queue: name of the queue
:param executor_config: Configuration passed to the executor.
"""
raise NotImplementedError()
def get_task_log(self, ti: TaskInstance, try_number: int) -> tuple[list[str], list[str]]:
"""
This method can be implemented by any child class to return the task logs.
:param ti: A TaskInstance object
:param try_number: current try_number to read log from
:return: tuple of logs and messages
"""
return [], []
def end(self) -> None: # pragma: no cover
"""Wait synchronously for the previously submitted job to complete."""
raise NotImplementedError()
def terminate(self):
"""This method is called when the daemon receives a SIGTERM."""
raise NotImplementedError()
def cleanup_stuck_queued_tasks(self, tis: list[TaskInstance]) -> list[str]: # pragma: no cover
"""
Handle remnants of tasks that were failed because they were stuck in queued.
Tasks can get stuck in queued. If such a task is detected, it will be marked
as `UP_FOR_RETRY` if the task instance has remaining retries or marked as `FAILED`
if it doesn't.
:param tis: List of Task Instances to clean up
:return: List of readable task instances for a warning message
"""
raise NotImplementedError()
def try_adopt_task_instances(self, tis: Sequence[TaskInstance]) -> Sequence[TaskInstance]:
"""
Try to adopt running task instances that have been abandoned by a SchedulerJob dying.
Anything that is not adopted will be cleared by the scheduler (and then become eligible for
re-scheduling)
:return: any TaskInstances that were unable to be adopted
"""
# By default, assume Executors cannot adopt tasks, so just say we failed to adopt anything.
# Subclasses can do better!
return tis
@property
def slots_available(self):
"""Number of new tasks this executor instance can accept."""
if self.parallelism:
return self.parallelism - len(self.running) - len(self.queued_tasks)
else:
return sys.maxsize
@staticmethod
def validate_command(command: list[str]) -> None:
"""
Back-compat method to Check if the command to execute is airflow command.
:param command: command to check
"""
warnings.warn(
"""
The `validate_command` method is deprecated. Please use ``validate_airflow_tasks_run_command``
""",
RemovedInAirflow3Warning,
stacklevel=2,
)
BaseExecutor.validate_airflow_tasks_run_command(command)
@staticmethod
def validate_airflow_tasks_run_command(command: list[str]) -> tuple[str | None, str | None]:
"""
Check if the command to execute is airflow command.
Returns tuple (dag_id,task_id) retrieved from the command (replaced with None values if missing)
"""
if command[0:3] != ["airflow", "tasks", "run"]:
raise ValueError('The command must start with ["airflow", "tasks", "run"].')
if len(command) > 3 and "--help" not in command:
dag_id: str | None = None
task_id: str | None = None
for arg in command[3:]:
if not arg.startswith("--"):
if dag_id is None:
dag_id = arg
else:
task_id = arg
break
return dag_id, task_id
return None, None
def debug_dump(self):
"""Called in response to SIGUSR2 by the scheduler."""
self.log.info(
"executor.queued (%d)\n\t%s",
len(self.queued_tasks),
"\n\t".join(map(repr, self.queued_tasks.items())),
)
self.log.info("executor.running (%d)\n\t%s", len(self.running), "\n\t".join(map(repr, self.running)))
self.log.info(
"executor.event_buffer (%d)\n\t%s",
len(self.event_buffer),
"\n\t".join(map(repr, self.event_buffer.items())),
)
def send_callback(self, request: CallbackRequest) -> None:
"""Sends callback for execution.
Provides a default implementation which sends the callback to the `callback_sink` object.
:param request: Callback request to be executed.
"""
if not self.callback_sink:
raise ValueError("Callback sink is not ready.")
self.callback_sink.send(request)
| 18,318 | 37.006224 | 110 |
py
|
airflow
|
airflow-main/airflow/notifications/basenotifier.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from abc import abstractmethod
from typing import TYPE_CHECKING, Sequence
import jinja2
from airflow.template.templater import Templater
from airflow.utils.context import Context, context_merge
if TYPE_CHECKING:
from airflow import DAG
class BaseNotifier(Templater):
"""BaseNotifier class for sending notifications."""
template_fields: Sequence[str] = ()
template_ext: Sequence[str] = ()
def __init__(self):
super().__init__()
self.resolve_template_files()
def _update_context(self, context: Context) -> Context:
"""
Add additional context to the context.
:param context: The airflow context
"""
additional_context = ((f, getattr(self, f)) for f in self.template_fields)
context_merge(context, additional_context)
return context
def _render(self, template, context, dag: DAG | None = None):
dag = dag or context["dag"]
return super()._render(template, context, dag)
def render_template_fields(
self,
context: Context,
jinja_env: jinja2.Environment | None = None,
) -> None:
"""Template all attributes listed in *self.template_fields*.
This mutates the attributes in-place and is irreversible.
:param context: Context dict with values to apply on content.
:param jinja_env: Jinja environment to use for rendering.
"""
dag = context["dag"]
if not jinja_env:
jinja_env = self.get_template_env(dag=dag)
self._do_render_template_fields(self, self.template_fields, context, jinja_env, set())
@abstractmethod
def notify(self, context: Context) -> None:
"""
Sends a notification.
:param context: The airflow context
"""
...
def __call__(self, *args) -> None:
"""
Send a notification.
:param context: The airflow context
"""
# Currently, there are two ways a callback is invoked
# 1. callback(context) - for on_*_callbacks
# 2. callback(dag, task_list, blocking_task_list, slas, blocking_tis) - for sla_miss_callback
# we have to distinguish between the two calls so that we can prepare the correct context,
if len(args) == 1:
context = args[0]
else:
context = {
"dag": args[0],
"task_list": args[1],
"blocking_task_list": args[2],
"slas": args[3],
"blocking_tis": args[4],
}
self._update_context(context)
self.render_template_fields(context)
try:
self.notify(context)
except Exception as e:
self.log.exception("Failed to send notification: %s", e)
| 3,607 | 32.407407 | 101 |
py
|
airflow
|
airflow-main/airflow/notifications/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/lineage/entities.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines base entities used for providing lineage information."""
from __future__ import annotations
from typing import Any, ClassVar
import attr
@attr.s(auto_attribs=True)
class File:
"""File entity. Refers to a file."""
template_fields: ClassVar = ("url",)
url: str = attr.ib()
type_hint: str | None = None
@attr.s(auto_attribs=True, kw_only=True)
class User:
"""User entity. Identifies a user."""
email: str = attr.ib()
first_name: str | None = None
last_name: str | None = None
template_fields: ClassVar = ("email", "first_name", "last_name")
@attr.s(auto_attribs=True, kw_only=True)
class Tag:
"""Tag or classification entity."""
tag_name: str = attr.ib()
template_fields: ClassVar = ("tag_name",)
@attr.s(auto_attribs=True, kw_only=True)
class Column:
"""Column of a Table."""
name: str = attr.ib()
description: str | None = None
data_type: str = attr.ib()
tags: list[Tag] = []
template_fields: ClassVar = ("name", "description", "data_type", "tags")
# this is a temporary hack to satisfy mypy. Once
# https://github.com/python/mypy/issues/6136 is resolved, use
# `attr.converters.default_if_none(default=False)`
def default_if_none(arg: bool | None) -> bool:
"""Get default value when None."""
return arg or False
@attr.s(auto_attribs=True, kw_only=True)
class Table:
"""Table entity."""
database: str = attr.ib()
cluster: str = attr.ib()
name: str = attr.ib()
tags: list[Tag] = []
description: str | None = None
columns: list[Column] = []
owners: list[User] = []
extra: dict[str, Any] = {}
type_hint: str | None = None
template_fields: ClassVar = (
"database",
"cluster",
"name",
"tags",
"description",
"columns",
"owners",
"extra",
)
| 2,650 | 24.990196 | 76 |
py
|
airflow
|
airflow-main/airflow/lineage/backend.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sends lineage metadata to a backend."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from airflow.models.baseoperator import BaseOperator
class LineageBackend:
"""Sends lineage metadata to a backend."""
def send_lineage(
self,
operator: BaseOperator,
inlets: list | None = None,
outlets: list | None = None,
context: dict | None = None,
):
"""
Sends lineage metadata to a backend.
:param operator: the operator executing a transformation on the inlets and outlets
:param inlets: the inlets to this operator
:param outlets: the outlets from this operator
:param context: the current context of the task instance
"""
raise NotImplementedError()
| 1,600 | 33.804348 | 90 |
py
|
airflow
|
airflow-main/airflow/lineage/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Provides lineage support functions."""
from __future__ import annotations
import itertools
import logging
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast
from airflow.configuration import conf
from airflow.lineage.backend import LineageBackend
from airflow.utils.session import create_session
if TYPE_CHECKING:
from airflow.utils.context import Context
PIPELINE_OUTLETS = "pipeline_outlets"
PIPELINE_INLETS = "pipeline_inlets"
AUTO = "auto"
log = logging.getLogger(__name__)
def get_backend() -> LineageBackend | None:
"""Gets the lineage backend if defined in the configs."""
clazz = conf.getimport("lineage", "backend", fallback=None)
if clazz:
if not issubclass(clazz, LineageBackend):
raise TypeError(
f"Your custom Lineage class `{clazz.__name__}` "
f"is not a subclass of `{LineageBackend.__name__}`."
)
else:
return clazz()
return None
def _render_object(obj: Any, context: Context) -> dict:
return context["ti"].task.render_template(obj, context)
T = TypeVar("T", bound=Callable)
def apply_lineage(func: T) -> T:
"""
Conditionally send lineage to the backend.
Saves the lineage to XCom and if configured to do so sends it
to the backend.
"""
_backend = get_backend()
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Lineage called with inlets: %s, outlets: %s", self.inlets, self.outlets)
ret_val = func(self, context, *args, **kwargs)
outlets = list(self.outlets)
inlets = list(self.inlets)
if outlets:
self.xcom_push(
context, key=PIPELINE_OUTLETS, value=outlets, execution_date=context["ti"].execution_date
)
if inlets:
self.xcom_push(
context, key=PIPELINE_INLETS, value=inlets, execution_date=context["ti"].execution_date
)
if _backend:
_backend.send_lineage(operator=self, inlets=self.inlets, outlets=self.outlets, context=context)
return ret_val
return cast(T, wrapper)
def prepare_lineage(func: T) -> T:
"""
Prepares the lineage inlets and outlets.
Inlets can be:
* "auto" -> picks up any outlets from direct upstream tasks that have outlets defined, as such that
if A -> B -> C and B does not have outlets but A does, these are provided as inlets.
* "list of task_ids" -> picks up outlets from the upstream task_ids
* "list of datasets" -> manually defined list of data
"""
@wraps(func)
def wrapper(self, context, *args, **kwargs):
from airflow.models.abstractoperator import AbstractOperator
self.log.debug("Preparing lineage inlets and outlets")
if isinstance(self.inlets, (str, AbstractOperator)):
self.inlets = [self.inlets]
if self.inlets and isinstance(self.inlets, list):
# get task_ids that are specified as parameter and make sure they are upstream
task_ids = (
{o for o in self.inlets if isinstance(o, str)}
.union(op.task_id for op in self.inlets if isinstance(op, AbstractOperator))
.intersection(self.get_flat_relative_ids(upstream=True))
)
# pick up unique direct upstream task_ids if AUTO is specified
if AUTO.upper() in self.inlets or AUTO.lower() in self.inlets:
task_ids = task_ids.union(task_ids.symmetric_difference(self.upstream_task_ids))
# Remove auto and task_ids
self.inlets = [i for i in self.inlets if not isinstance(i, str)]
# We manually create a session here since xcom_pull returns a LazyXComAccess iterator.
# If we do not pass a session a new session will be created, however that session will not be
# properly closed and will remain open. After we are done iterating we can safely close this
# session.
with create_session() as session:
_inlets = self.xcom_pull(
context, task_ids=task_ids, dag_id=self.dag_id, key=PIPELINE_OUTLETS, session=session
)
self.inlets.extend(i for i in itertools.chain.from_iterable(_inlets))
elif self.inlets:
raise AttributeError("inlets is not a list, operator, string or attr annotated object")
if not isinstance(self.outlets, list):
self.outlets = [self.outlets]
# render inlets and outlets
self.inlets = [_render_object(i, context) for i in self.inlets]
self.outlets = [_render_object(i, context) for i in self.outlets]
self.log.debug("inlets: %s, outlets: %s", self.inlets, self.outlets)
return func(self, context, *args, **kwargs)
return cast(T, wrapper)
| 5,691 | 33.920245 | 107 |
py
|
airflow
|
airflow-main/airflow/metrics/protocols.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import time
from typing import Union
from airflow.typing_compat import Protocol
DeltaType = Union[int, float, datetime.timedelta]
class TimerProtocol(Protocol):
"""Type protocol for StatsLogger.timer."""
def __enter__(self) -> Timer:
...
def __exit__(self, exc_type, exc_value, traceback) -> None:
...
def start(self) -> Timer:
"""Start the timer."""
...
def stop(self, send: bool = True) -> None:
"""Stop, and (by default) submit the timer to StatsD."""
...
class Timer(TimerProtocol):
"""
Timer that records duration, and optional sends to StatsD backend.
This class lets us have an accurate timer with the logic in one place (so
that we don't use datetime math for duration -- it is error prone).
Example usage:
.. code-block:: python
with Stats.timer() as t:
# Something to time
frob_the_foos()
log.info("Frobbing the foos took %.2f", t.duration)
Or without a context manager:
.. code-block:: python
timer = Stats.timer().start()
# Something to time
frob_the_foos()
timer.end()
log.info("Frobbing the foos took %.2f", timer.duration)
To send a metric:
.. code-block:: python
with Stats.timer("foos.frob"):
# Something to time
frob_the_foos()
Or both:
.. code-block:: python
with Stats.timer("foos.frob") as t:
# Something to time
frob_the_foos()
log.info("Frobbing the foos took %.2f", t.duration)
"""
# pystatsd and dogstatsd both have a timer class, but present different API
# so we can't use this as a mixin on those, instead this class contains the "real" timer
_start_time: float | None
duration: float | None
def __init__(self, real_timer: Timer | None = None) -> None:
self.real_timer = real_timer
def __enter__(self) -> Timer:
return self.start()
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.stop()
def start(self) -> Timer:
"""Start the timer."""
if self.real_timer:
self.real_timer.start()
self._start_time = time.perf_counter()
return self
def stop(self, send: bool = True) -> None:
"""Stop the timer, and optionally send it to stats backend."""
if self._start_time is not None:
self.duration = time.perf_counter() - self._start_time
if send and self.real_timer:
self.real_timer.stop()
| 3,419 | 26.580645 | 92 |
py
|
airflow
|
airflow-main/airflow/metrics/datadog_logger.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import logging
from typing import TYPE_CHECKING
from airflow.configuration import conf
from airflow.metrics.protocols import DeltaType, Timer, TimerProtocol
from airflow.metrics.validators import (
AllowListValidator,
BlockListValidator,
ListValidator,
validate_stat,
)
if TYPE_CHECKING:
from datadog import DogStatsd
log = logging.getLogger(__name__)
class SafeDogStatsdLogger:
"""DogStatsd Logger."""
def __init__(
self,
dogstatsd_client: DogStatsd,
metrics_validator: ListValidator = AllowListValidator(),
metrics_tags: bool = False,
metric_tags_validator: ListValidator = AllowListValidator(),
) -> None:
self.dogstatsd = dogstatsd_client
self.metrics_validator = metrics_validator
self.metrics_tags = metrics_tags
self.metric_tags_validator = metric_tags_validator
@validate_stat
def incr(
self,
stat: str,
count: int = 1,
rate: float = 1,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Increment stat."""
if self.metrics_tags and isinstance(tags, dict):
tags_list = [
f"{key}:{value}" for key, value in tags.items() if self.metric_tags_validator.test(key)
]
else:
tags_list = []
if self.metrics_validator.test(stat):
return self.dogstatsd.increment(metric=stat, value=count, tags=tags_list, sample_rate=rate)
return None
@validate_stat
def decr(
self,
stat: str,
count: int = 1,
rate: float = 1,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Decrement stat."""
if self.metrics_tags and isinstance(tags, dict):
tags_list = [
f"{key}:{value}" for key, value in tags.items() if self.metric_tags_validator.test(key)
]
else:
tags_list = []
if self.metrics_validator.test(stat):
return self.dogstatsd.decrement(metric=stat, value=count, tags=tags_list, sample_rate=rate)
return None
@validate_stat
def gauge(
self,
stat: str,
value: int | float,
rate: float = 1,
delta: bool = False,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Gauge stat."""
if self.metrics_tags and isinstance(tags, dict):
tags_list = [
f"{key}:{value}" for key, value in tags.items() if self.metric_tags_validator.test(key)
]
else:
tags_list = []
if self.metrics_validator.test(stat):
return self.dogstatsd.gauge(metric=stat, value=value, tags=tags_list, sample_rate=rate)
return None
@validate_stat
def timing(
self,
stat: str,
dt: DeltaType,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Stats timing."""
if self.metrics_tags and isinstance(tags, dict):
tags_list = [
f"{key}:{value}" for key, value in tags.items() if self.metric_tags_validator.test(key)
]
else:
tags_list = []
if self.metrics_validator.test(stat):
if isinstance(dt, datetime.timedelta):
dt = dt.total_seconds()
return self.dogstatsd.timing(metric=stat, value=dt, tags=tags_list)
return None
@validate_stat
def timer(
self,
stat: str | None = None,
tags: dict[str, str] | None = None,
**kwargs,
) -> TimerProtocol:
"""Timer metric that can be cancelled."""
if self.metrics_tags and isinstance(tags, dict):
tags_list = [
f"{key}:{value}" for key, value in tags.items() if self.metric_tags_validator.test(key)
]
else:
tags_list = []
if stat and self.metrics_validator.test(stat):
return Timer(self.dogstatsd.timed(stat, tags=tags_list, **kwargs))
return Timer()
def get_dogstatsd_logger(cls) -> SafeDogStatsdLogger:
"""Get DataDog StatsD logger."""
from datadog import DogStatsd
metrics_validator: ListValidator
dogstatsd = DogStatsd(
host=conf.get("metrics", "statsd_host"),
port=conf.getint("metrics", "statsd_port"),
namespace=conf.get("metrics", "statsd_prefix"),
constant_tags=cls.get_constant_tags(),
)
if conf.get("metrics", "metrics_allow_list", fallback=None):
metrics_validator = AllowListValidator(conf.get("metrics", "metrics_allow_list"))
if conf.get("metrics", "metrics_block_list", fallback=None):
log.warning(
"Ignoring metrics_block_list as both metrics_allow_list "
"and metrics_block_list have been set"
)
elif conf.get("metrics", "metrics_block_list", fallback=None):
metrics_validator = BlockListValidator(conf.get("metrics", "metrics_block_list"))
else:
metrics_validator = AllowListValidator()
datadog_metrics_tags = conf.getboolean("metrics", "statsd_datadog_metrics_tags", fallback=True)
metric_tags_validator = BlockListValidator(conf.get("metrics", "statsd_disabled_tags", fallback=None))
return SafeDogStatsdLogger(dogstatsd, metrics_validator, datadog_metrics_tags, metric_tags_validator)
| 6,238 | 33.469613 | 106 |
py
|
airflow
|
airflow-main/airflow/metrics/validators.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Only characters in the character set are considered valid
# for the stat_name if stat_name_default_handler is used.
from __future__ import annotations
import abc
import logging
import string
import warnings
from functools import partial, wraps
from typing import Callable, Iterable, Pattern, cast
import re2
from airflow.configuration import conf
from airflow.exceptions import InvalidStatsNameException
log = logging.getLogger(__name__)
class MetricNameLengthExemptionWarning(Warning):
"""
A Warning class to be used for the metric name length exemption notice.
Using a custom Warning class allows us to easily test that it is used.
"""
...
# Only characters in the character set are considered valid
# for the stat_name if stat_name_default_handler is used.
ALLOWED_CHARACTERS = frozenset(string.ascii_letters + string.digits + "_.-")
# The following set contains existing metrics whose names are too long for
# OpenTelemetry and should be deprecated over time. This is implemented to
# ensure that any new metrics we introduce have names which meet the OTel
# standard while also allowing us time to deprecate the old names.
# NOTE: No new names should be added to this list. This list should
# only ever shorten over time as we deprecate these names.
BACK_COMPAT_METRIC_NAME_PATTERNS: set[str] = {
r"^(?P<job_name>.*)_start$",
r"^(?P<job_name>.*)_end$",
r"^(?P<job_name>.*)_heartbeat_failure$",
r"^local_task_job.task_exit\.(?P<job_id>.*)\.(?P<dag_id>.*)\.(?P<task_id>.*)\.(?P<return_code>.*)$",
r"^operator_failures_(?P<operator_name>.*)$",
r"^operator_successes_(?P<operator_name>.*)$",
r"^ti.start.(?P<dag_id>.*)\.(?P<task_id>.*)$",
r"^ti.finish.(?P<dag_id>.*)\.(?P<task_id>.*)\.(?P<state>.*)$",
r"^task_removed_from_dag\.(?P<dag_id>.*)$",
r"^task_restored_to_dag\.(?P<dag_id>.*)$",
r"^task_instance_created_(?P<operator_name>.*)$",
r"^dag_processing\.last_run\.seconds_ago\.(?P<dag_file>.*)$",
r"^pool\.open_slots\.(?P<pool_name>.*)$",
r"^pool\.queued_slots\.(?P<pool_name>.*)$",
r"^pool\.running_slots\.(?P<pool_name>.*)$",
r"^pool\.starving_tasks\.(?P<pool_name>.*)$",
r"^dagrun\.dependency-check\.(?P<dag_id>.*)$",
r"^dag\.(?P<dag_id>.*)\.(?P<task_id>.*)\.duration$",
r"^dag_processing\.last_duration\.(?P<dag_file>.*)$",
r"^dagrun\.duration\.success\.(?P<dag_id>.*)$",
r"^dagrun\.duration\.failed\.(?P<dag_id>.*)$",
r"^dagrun\.schedule_delay\.(?P<dag_id>.*)$",
r"^dagrun\.(?P<dag_id>.*)\.first_task_scheduling_delay$",
}
BACK_COMPAT_METRIC_NAMES: set[Pattern[str]] = {re2.compile(name) for name in BACK_COMPAT_METRIC_NAME_PATTERNS}
OTEL_NAME_MAX_LENGTH = 63
def validate_stat(fn: Callable) -> Callable:
"""Check if stat name contains invalid characters; logs and does not emit stats if name is invalid."""
@wraps(fn)
def wrapper(self, stat: str | None = None, *args, **kwargs) -> Callable | None:
try:
if stat is not None:
handler_stat_name_func = get_current_handler_stat_name_func()
stat = handler_stat_name_func(stat)
return fn(self, stat, *args, **kwargs)
except InvalidStatsNameException:
log.exception("Invalid stat name: %s.", stat)
return None
return cast(Callable, wrapper)
def stat_name_otel_handler(
stat_prefix: str,
stat_name: str,
max_length: int = OTEL_NAME_MAX_LENGTH,
) -> str:
"""
Verifies that a proposed prefix and name combination will meet OpenTelemetry naming standards.
See: https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax
:param stat_prefix: The proposed prefix applied to all metric names.
:param stat_name: The proposed name.
:param max_length: The max length of the combined prefix and name; defaults to the max length
as defined in the OpenTelemetry standard, but can be overridden.
:returns: Returns the approved combined name or raises an InvalidStatsNameException.
"""
proposed_stat_name: str = f"{stat_prefix}.{stat_name}"
name_length_exemption: bool = False
matched_exemption: str = ""
# This test case is here to enforce that the values can not be None and
# must be a valid String. Without this test here, those values get cast
# to a string and pass when they should not, potentially resulting in
# metrics named "airflow.None", "airflow.42", or "None.42" for example.
if not (isinstance(stat_name, str) and isinstance(stat_prefix, str)):
raise InvalidStatsNameException("Stat name and prefix must both be strings.")
if len(proposed_stat_name) > OTEL_NAME_MAX_LENGTH:
# If the name is in the exceptions list, do not fail it for being too long.
# It may still be deemed invalid for other reasons below.
for exemption in BACK_COMPAT_METRIC_NAMES:
if re2.match(exemption, stat_name):
# There is a back-compat exception for this name; proceed
name_length_exemption = True
matched_exemption = exemption.pattern
break
else:
raise InvalidStatsNameException(
f"Invalid stat name: {proposed_stat_name}. Please see "
f"https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax"
)
# `stat_name_default_handler` throws InvalidStatsNameException if the
# provided value is not valid or returns the value if it is. We don't
# need the return value but will make use of the validation checks. If
# no exception is thrown, then the proposed name meets OTel requirements.
stat_name_default_handler(proposed_stat_name, max_length=999 if name_length_exemption else max_length)
# This warning is down here instead of up above because the exemption only
# applies to the length and a name may still be invalid for other reasons.
if name_length_exemption:
warnings.warn(
f"Stat name {stat_name} matches exemption {matched_exemption} and "
f"will be truncated to {proposed_stat_name[:OTEL_NAME_MAX_LENGTH]}. "
f"This stat name will be deprecated in the future and replaced with "
f"a shorter name combined with Attributes/Tags.",
MetricNameLengthExemptionWarning,
)
return proposed_stat_name
def stat_name_default_handler(
stat_name: str, max_length: int = 250, allowed_chars: Iterable[str] = ALLOWED_CHARACTERS
) -> str:
"""
Validate the metric stat name.
Apply changes when necessary and return the transformed stat name.
"""
if not isinstance(stat_name, str):
raise InvalidStatsNameException("The stat_name has to be a string")
if len(stat_name) > max_length:
raise InvalidStatsNameException(
f"The stat_name ({stat_name}) has to be less than {max_length} characters."
)
if not all((c in allowed_chars) for c in stat_name):
raise InvalidStatsNameException(
f"The stat name ({stat_name}) has to be composed of ASCII "
f"alphabets, numbers, or the underscore, dot, or dash characters."
)
return stat_name
def get_current_handler_stat_name_func() -> Callable[[str], str]:
"""Get Stat Name Handler from airflow.cfg."""
handler = conf.getimport("metrics", "stat_name_handler")
if handler is None:
if conf.get("metrics", "statsd_influxdb_enabled", fallback=False):
handler = partial(stat_name_default_handler, allowed_chars={*ALLOWED_CHARACTERS, ",", "="})
else:
handler = stat_name_default_handler
return handler
class ListValidator(metaclass=abc.ABCMeta):
"""
ListValidator metaclass that can be implemented as a AllowListValidator or BlockListValidator.
The test method must be overridden by its subclass.
"""
def __init__(self, validate_list: str | None = None) -> None:
self.validate_list: tuple[str, ...] | None = (
tuple(item.strip().lower() for item in validate_list.split(",")) if validate_list else None
)
@classmethod
def __subclasshook__(cls, subclass: Callable[[str], str]) -> bool:
return hasattr(subclass, "test") and callable(subclass.test) or NotImplemented
@abc.abstractmethod
def test(self, name: str) -> bool:
"""Test if name is allowed."""
raise NotImplementedError
class AllowListValidator(ListValidator):
"""AllowListValidator only allows names that match the allowed prefixes."""
def test(self, name: str) -> bool:
if self.validate_list is not None:
return name.strip().lower().startswith(self.validate_list)
else:
return True # default is all metrics are allowed
class BlockListValidator(ListValidator):
"""BlockListValidator only allows names that do not match the blocked prefixes."""
def test(self, name: str) -> bool:
if self.validate_list is not None:
return not name.strip().lower().startswith(self.validate_list)
else:
return True # default is all metrics are allowed
| 9,990 | 40.629167 | 110 |
py
|
airflow
|
airflow-main/airflow/metrics/otel_logger.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import logging
import random
import warnings
from functools import partial
from typing import Callable, Iterable, Union
from opentelemetry import metrics
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
from opentelemetry.metrics import Instrument, Observation
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics._internal.export import ConsoleMetricExporter, PeriodicExportingMetricReader
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.util.types import Attributes
from airflow.configuration import conf
from airflow.metrics.protocols import DeltaType, Timer, TimerProtocol
from airflow.metrics.validators import (
OTEL_NAME_MAX_LENGTH,
AllowListValidator,
stat_name_otel_handler,
)
log = logging.getLogger(__name__)
GaugeValues = Union[int, float]
DEFAULT_GAUGE_VALUE = 0.0
# "airflow.dag_processing.processes" is currently the only UDC used in Airflow. If more are added,
# we should add a better system for this.
#
# Generally in OTel a Counter is monotonic (can only go up) and there is an UpDownCounter which,
# as you can guess, is non-monotonic; it can go up or down. The choice here is to either drop
# this one metric and implement the rest as monotonic Counters, implement all counters as
# UpDownCounters, or add a bit of logic to do it intelligently. The catch is that the Collector
# which transmits these metrics to the upstream dashboard tools (Prometheus, Grafana, etc.) assigns
# the type of Gauge to any UDC instead of Counter. Adding this logic feels like the best compromise
# where normal Counters still get typed correctly, and we don't lose an existing metric.
# See:
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#counter-creation
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#updowncounter
UP_DOWN_COUNTERS = {"airflow.dag_processing.processes"}
DEFAULT_METRIC_NAME_PREFIX = "airflow"
# Delimiter is placed between the universal metric prefix and the unique metric name.
DEFAULT_METRIC_NAME_DELIMITER = "."
def full_name(name: str, *, prefix: str = DEFAULT_METRIC_NAME_PREFIX) -> str:
"""Assembles the prefix, delimiter, and name and returns it as a string."""
return f"{prefix}{DEFAULT_METRIC_NAME_DELIMITER}{name}"
def _is_up_down_counter(name):
return name in UP_DOWN_COUNTERS
def _generate_key_name(name: str, attributes: Attributes = None):
if attributes:
key = name
for item in attributes.items():
key += f"_{item[0]}_{item[1]}"
else:
key = name
return key
def name_is_otel_safe(prefix: str, name: str) -> bool:
"""
Returns True if the provided name and prefix would result in a name that meets the OpenTelemetry standard.
Legal names are defined here:
https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax
"""
return bool(stat_name_otel_handler(prefix, name, max_length=OTEL_NAME_MAX_LENGTH))
def _type_as_str(obj: Instrument) -> str:
"""
Given an OpenTelemetry Instrument, returns the type of the instrument as a string.
:param obj: An OTel Instrument or subclass
:returns: The type() of the Instrument without all the nested class info
"""
# type().__name__ will return something like: '_Counter',
# this drops the leading underscore for cleaner logging.
return type(obj).__name__[1:]
def _get_otel_safe_name(name: str) -> str:
"""
Verifies that the provided name does not exceed OpenTelemetry's maximum length for metric names.
:param name: The original metric name
:returns: The name, truncated to an OTel-acceptable length if required.
"""
otel_safe_name = name[:OTEL_NAME_MAX_LENGTH]
if name != otel_safe_name:
warnings.warn(
f"Metric name `{name}` exceeds OpenTelemetry's name length limit of "
f"{OTEL_NAME_MAX_LENGTH} characters and will be truncated to `{otel_safe_name}`."
)
return otel_safe_name
def _skip_due_to_rate(rate: float) -> bool:
if rate < 0:
raise ValueError("rate must be a positive value.")
return rate < 1 and random.random() > rate
class _OtelTimer(Timer):
"""
An implementation of Stats.Timer() which records the result in the OTel Metrics Map.
OpenTelemetry does not have a native timer, we will store the values as a Gauge.
:param name: The name of the timer.
:param tags: Tags to append to the timer.
"""
def __init__(self, otel_logger: SafeOtelLogger, name: str | None, tags: Attributes):
super().__init__()
self.otel_logger = otel_logger
self.name = name
self.tags = tags
def stop(self, send: bool = True) -> None:
super().stop(send)
if self.name and send:
self.otel_logger.metrics_map.set_gauge_value(
full_name(prefix=self.otel_logger.prefix, name=self.name), self.duration, False, self.tags
)
class SafeOtelLogger:
"""Otel Logger."""
def __init__(
self,
otel_provider,
prefix: str = DEFAULT_METRIC_NAME_PREFIX,
allow_list_validator=AllowListValidator(),
):
self.otel: Callable = otel_provider
self.prefix: str = prefix
self.metrics_validator = allow_list_validator
self.meter = otel_provider.get_meter(__name__)
self.metrics_map = MetricsMap(self.meter)
def incr(
self,
stat: str,
count: int = 1,
rate: float = 1,
tags: Attributes = None,
):
"""
Increment stat by count.
:param stat: The name of the stat to increment.
:param count: A positive integer to add to the current value of stat.
:param rate: value between 0 and 1 that represents the sample rate at
which the metric is going to be emitted.
:param tags: Tags to append to the stat.
"""
if _skip_due_to_rate(rate):
return
if count < 0:
raise ValueError("count must be a positive value.")
if self.metrics_validator.test(stat) and name_is_otel_safe(self.prefix, stat):
counter = self.metrics_map.get_counter(full_name(prefix=self.prefix, name=stat), attributes=tags)
counter.add(count, attributes=tags)
return counter
def decr(
self,
stat: str,
count: int = 1,
rate: float = 1,
tags: Attributes = None,
):
"""
Decrement stat by count.
:param stat: The name of the stat to decrement.
:param count: A positive integer to subtract from current value of stat.
:param rate: value between 0 and 1 that represents the sample rate at
which the metric is going to be emitted.
:param tags: Tags to append to the stat.
"""
if _skip_due_to_rate(rate):
return
if count < 0:
raise ValueError("count must be a positive value.")
if self.metrics_validator.test(stat) and name_is_otel_safe(self.prefix, stat):
counter = self.metrics_map.get_counter(full_name(prefix=self.prefix, name=stat))
counter.add(-count, attributes=tags)
return counter
def gauge(
self,
stat: str,
value: int | float,
rate: float = 1,
delta: bool = False,
*,
tags: Attributes = None,
back_compat_name: str = "",
) -> None:
"""
Record a new value for a Gauge.
:param stat: The name of the stat to update.
:param value: The new value of stat, either a float or an int.
:param rate: value between 0 and 1 that represents the sample rate at
which the metric is going to be emitted.
:param delta: If true, the provided value will be added to the previous value.
If False the new value will override the previous.
:param tags: Tags to append to the stat.
:param back_compat_name: If an alternative name is provided, the
stat will be emitted using both names if possible.
"""
if _skip_due_to_rate(rate):
return
if back_compat_name and self.metrics_validator.test(back_compat_name):
self.metrics_map.set_gauge_value(
full_name(prefix=self.prefix, name=back_compat_name), value, delta, tags
)
if self.metrics_validator.test(stat):
self.metrics_map.set_gauge_value(full_name(prefix=self.prefix, name=stat), value, delta, tags)
def timing(
self,
stat: str,
dt: DeltaType,
*,
tags: Attributes = None,
) -> None:
"""OTel does not have a native timer, stored as a Gauge whose value is number of seconds elapsed."""
if self.metrics_validator.test(stat) and name_is_otel_safe(self.prefix, stat):
if isinstance(dt, datetime.timedelta):
dt = dt.total_seconds()
self.metrics_map.set_gauge_value(full_name(prefix=self.prefix, name=stat), float(dt), False, tags)
def timer(
self,
stat: str | None = None,
*args,
tags: Attributes = None,
**kwargs,
) -> TimerProtocol:
"""Timer context manager returns the duration and can be cancelled."""
return _OtelTimer(self, stat, tags)
class MetricsMap:
"""Stores Otel Instruments."""
def __init__(self, meter):
self.meter = meter
self.map = {}
def clear(self) -> None:
self.map.clear()
def _create_counter(self, name):
"""Creates a new counter or up_down_counter for the provided name."""
otel_safe_name = _get_otel_safe_name(name)
if _is_up_down_counter(name):
counter = self.meter.create_up_down_counter(name=otel_safe_name)
else:
counter = self.meter.create_counter(name=otel_safe_name)
logging.debug("Created %s as type: %s", otel_safe_name, _type_as_str(counter))
return counter
def get_counter(self, name: str, attributes: Attributes = None):
"""
Returns the counter; creates a new one if it did not exist.
:param name: The name of the counter to fetch or create.
:param attributes: Counter attributes, used to generate a unique key to store the counter.
"""
key = _generate_key_name(name, attributes)
if key in self.map.keys():
return self.map[key]
else:
new_counter = self._create_counter(name)
self.map[key] = new_counter
return new_counter
def del_counter(self, name: str, attributes: Attributes = None) -> None:
"""
Deletes a counter.
:param name: The name of the counter to delete.
:param attributes: Counter attributes which were used to generate a unique key to store the counter.
"""
key = _generate_key_name(name, attributes)
if key in self.map.keys():
del self.map[key]
def set_gauge_value(self, name: str, value: float | None, delta: bool, tags: Attributes):
"""
Overrides the last reading for a Gauge with a new value.
:param name: The name of the gauge to record.
:param value: The new reading to record.
:param delta: If True, value is added to the previous reading, else it overrides.
:param tags: Gauge attributes which were used to generate a unique key to store the counter.
:returns: None
"""
key: str = _generate_key_name(name, tags)
new_value = value or DEFAULT_GAUGE_VALUE
old_value = self.poke_gauge(name, tags)
if delta:
new_value += old_value
# If delta is true, add the new value to the last reading otherwise overwrite it.
self.map[key] = Observation(new_value, tags)
def _create_gauge(self, name: str, attributes: Attributes = None):
"""
Creates a new Observable Gauge with the provided name and the default value.
:param name: The name of the gauge to fetch or create.
:param attributes: Gauge attributes, used to generate a unique key to store the gauge.
"""
otel_safe_name = _get_otel_safe_name(name)
key = _generate_key_name(name, attributes)
gauge = self.meter.create_observable_gauge(
name=otel_safe_name,
callbacks=[partial(self.read_gauge, _generate_key_name(name, attributes))],
)
self.map[key] = Observation(DEFAULT_GAUGE_VALUE, attributes)
return gauge
def read_gauge(self, key: str, *args) -> Iterable[Observation]:
"""Callback for the Observable Gauges, returns the Observation for the provided key."""
yield self.map[key]
def poke_gauge(self, name: str, attributes: Attributes = None) -> GaugeValues:
"""
Returns the value of the gauge; creates a new one with the default value if it did not exist.
:param name: The name of the gauge to fetch or create.
:param attributes: Gauge attributes, used to generate a unique key to store the gauge.
:returns: The integer or float value last recorded for the provided Gauge name.
"""
key = _generate_key_name(name, attributes)
if key not in self.map:
self._create_gauge(name, attributes)
return self.map[key].value
def get_otel_logger(cls) -> SafeOtelLogger:
host = conf.get("metrics", "otel_host") # ex: "breeze-otel-collector"
port = conf.getint("metrics", "otel_port") # ex: 4318
prefix = conf.get("metrics", "otel_prefix") # ex: "airflow"
# PeriodicExportingMetricReader will default to an interval of 60000 millis.
interval = conf.getint("metrics", "otel_interval_milliseconds", fallback=None) # ex: 30000
debug = conf.getboolean("metrics", "otel_debugging_on")
allow_list = conf.get("metrics", "metrics_allow_list", fallback=None)
allow_list_validator = AllowListValidator(allow_list)
resource = Resource(attributes={SERVICE_NAME: "Airflow"})
# TODO: figure out https instead of http ??
endpoint = f"http://{host}:{port}/v1/metrics"
logging.info("[Metric Exporter] Connecting to OpenTelemetry Collector at %s", endpoint)
readers = [
PeriodicExportingMetricReader(
OTLPMetricExporter(
endpoint=endpoint,
headers={"Content-Type": "application/json"},
),
export_interval_millis=interval,
)
]
if debug:
export_to_console = PeriodicExportingMetricReader(ConsoleMetricExporter())
readers.append(export_to_console)
metrics.set_meter_provider(
MeterProvider(
resource=resource,
metric_readers=readers,
shutdown_on_exit=False,
),
)
return SafeOtelLogger(metrics.get_meter_provider(), prefix, allow_list_validator)
| 15,907 | 36.518868 | 119 |
py
|
airflow
|
airflow-main/airflow/metrics/statsd_logger.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from functools import wraps
from typing import TYPE_CHECKING, Callable, TypeVar, cast
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.metrics.protocols import DeltaType, Timer, TimerProtocol
from airflow.metrics.validators import (
AllowListValidator,
BlockListValidator,
ListValidator,
validate_stat,
)
if TYPE_CHECKING:
from statsd import StatsClient
T = TypeVar("T", bound=Callable)
log = logging.getLogger(__name__)
def prepare_stat_with_tags(fn: T) -> T:
"""Add tags to stat with influxdb standard format if influxdb_tags_enabled is True."""
@wraps(fn)
def wrapper(
self, stat: str | None = None, *args, tags: dict[str, str] | None = None, **kwargs
) -> Callable[[str], str]:
if self.influxdb_tags_enabled:
if stat is not None and tags is not None:
for k, v in tags.items():
if self.metric_tags_validator.test(k):
if all(c not in [",", "="] for c in v + k):
stat += f",{k}={v}"
else:
log.error("Dropping invalid tag: %s=%s.", k, v)
return fn(self, stat, *args, tags=tags, **kwargs)
return cast(T, wrapper)
class SafeStatsdLogger:
"""StatsD Logger."""
def __init__(
self,
statsd_client: StatsClient,
metrics_validator: ListValidator = AllowListValidator(),
influxdb_tags_enabled: bool = False,
metric_tags_validator: ListValidator = AllowListValidator(),
) -> None:
self.statsd = statsd_client
self.metrics_validator = metrics_validator
self.influxdb_tags_enabled = influxdb_tags_enabled
self.metric_tags_validator = metric_tags_validator
@prepare_stat_with_tags
@validate_stat
def incr(
self,
stat: str,
count: int = 1,
rate: float = 1,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Increment stat."""
if self.metrics_validator.test(stat):
return self.statsd.incr(stat, count, rate)
return None
@prepare_stat_with_tags
@validate_stat
def decr(
self,
stat: str,
count: int = 1,
rate: float = 1,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Decrement stat."""
if self.metrics_validator.test(stat):
return self.statsd.decr(stat, count, rate)
return None
@prepare_stat_with_tags
@validate_stat
def gauge(
self,
stat: str,
value: int | float,
rate: float = 1,
delta: bool = False,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Gauge stat."""
if self.metrics_validator.test(stat):
return self.statsd.gauge(stat, value, rate, delta)
return None
@prepare_stat_with_tags
@validate_stat
def timing(
self,
stat: str,
dt: DeltaType,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Stats timing."""
if self.metrics_validator.test(stat):
return self.statsd.timing(stat, dt)
return None
@prepare_stat_with_tags
@validate_stat
def timer(
self,
stat: str | None = None,
*args,
tags: dict[str, str] | None = None,
**kwargs,
) -> TimerProtocol:
"""Timer metric that can be cancelled."""
if stat and self.metrics_validator.test(stat):
return Timer(self.statsd.timer(stat, *args, **kwargs))
return Timer()
def get_statsd_logger(cls) -> SafeStatsdLogger:
"""Returns logger for StatsD."""
# no need to check for the scheduler/statsd_on -> this method is only called when it is set
# and previously it would crash with None is callable if it was called without it.
from statsd import StatsClient
stats_class = conf.getimport("metrics", "statsd_custom_client_path", fallback=None)
metrics_validator: ListValidator
if stats_class:
if not issubclass(stats_class, StatsClient):
raise AirflowConfigException(
"Your custom StatsD client must extend the statsd.StatsClient in order to ensure "
"backwards compatibility."
)
else:
log.info("Successfully loaded custom StatsD client")
else:
stats_class = StatsClient
statsd = stats_class(
host=conf.get("metrics", "statsd_host"),
port=conf.getint("metrics", "statsd_port"),
prefix=conf.get("metrics", "statsd_prefix"),
)
if conf.get("metrics", "metrics_allow_list", fallback=None):
metrics_validator = AllowListValidator(conf.get("metrics", "metrics_allow_list"))
if conf.get("metrics", "metrics_block_list", fallback=None):
log.warning(
"Ignoring metrics_block_list as both metrics_allow_list "
"and metrics_block_list have been set"
)
elif conf.get("metrics", "metrics_block_list", fallback=None):
metrics_validator = BlockListValidator(conf.get("metrics", "metrics_block_list"))
else:
metrics_validator = AllowListValidator()
influxdb_tags_enabled = conf.getboolean("metrics", "statsd_influxdb_enabled", fallback=False)
metric_tags_validator = BlockListValidator(conf.get("metrics", "statsd_disabled_tags", fallback=None))
return SafeStatsdLogger(statsd, metrics_validator, influxdb_tags_enabled, metric_tags_validator)
| 6,448 | 32.588542 | 106 |
py
|
airflow
|
airflow-main/airflow/metrics/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/metrics/base_stats_logger.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from airflow.metrics.protocols import DeltaType, Timer, TimerProtocol
from airflow.typing_compat import Protocol
class StatsLogger(Protocol):
"""This class is only used for TypeChecking (for IDEs, mypy, etc)."""
@classmethod
def incr(
cls,
stat: str,
count: int = 1,
rate: int | float = 1,
*,
tags: dict[str, Any] | None = None,
) -> None:
"""Increment stat."""
@classmethod
def decr(
cls,
stat: str,
count: int = 1,
rate: int | float = 1,
*,
tags: dict[str, Any] | None = None,
) -> None:
"""Decrement stat."""
@classmethod
def gauge(
cls,
stat: str,
value: float,
rate: int | float = 1,
delta: bool = False,
*,
tags: dict[str, Any] | None = None,
) -> None:
"""Gauge stat."""
@classmethod
def timing(
cls,
stat: str,
dt: DeltaType | None,
*,
tags: dict[str, Any] | None = None,
) -> None:
"""Stats timing."""
@classmethod
def timer(cls, *args, **kwargs) -> TimerProtocol:
"""Timer metric that can be cancelled."""
raise NotImplementedError()
class NoStatsLogger:
"""If no StatsLogger is configured, NoStatsLogger is used as a fallback."""
@classmethod
def incr(cls, stat: str, count: int = 1, rate: int = 1, *, tags: dict[str, str] | None = None) -> None:
"""Increment stat."""
@classmethod
def decr(cls, stat: str, count: int = 1, rate: int = 1, *, tags: dict[str, str] | None = None) -> None:
"""Decrement stat."""
@classmethod
def gauge(
cls,
stat: str,
value: int,
rate: int = 1,
delta: bool = False,
*,
tags: dict[str, str] | None = None,
) -> None:
"""Gauge stat."""
@classmethod
def timing(cls, stat: str, dt: DeltaType, *, tags: dict[str, str] | None = None) -> None:
"""Stats timing."""
@classmethod
def timer(cls, *args, **kwargs) -> TimerProtocol:
"""Timer metric that can be cancelled."""
return Timer()
| 3,030 | 26.554545 | 107 |
py
|
airflow
|
airflow-main/airflow/template/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/template/templater.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Collection, Iterable, Sequence
from airflow.utils.context import Context
from airflow.utils.helpers import render_template_as_native, render_template_to_string
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import ResolveMixin
from airflow.utils.session import NEW_SESSION, provide_session
if TYPE_CHECKING:
import jinja2
from sqlalchemy.orm import Session
from airflow import DAG
class Templater(LoggingMixin):
"""
This renders the template fields of object.
:meta private:
"""
# For derived classes to define which fields will get jinjaified.
template_fields: Collection[str]
# Defines which files extensions to look for in the templated fields.
template_ext: Sequence[str]
def get_template_env(self, dag: DAG | None = None) -> jinja2.Environment:
"""Fetch a Jinja template environment from the DAG or instantiate empty environment if no DAG."""
# This is imported locally since Jinja2 is heavy and we don't need it
# for most of the functionalities. It is imported by get_template_env()
# though, so we don't need to put this after the 'if dag' check.
from airflow.templates import SandboxedEnvironment
if dag:
return dag.get_template_env(force_sandboxed=False)
return SandboxedEnvironment(cache_size=0)
def prepare_template(self) -> None:
"""Hook triggered after the templated fields get replaced by their content.
If you need your object to alter the content of the file before the
template is rendered, it should override this method to do so.
"""
def resolve_template_files(self) -> None:
"""Getting the content of files for template_field / template_ext."""
if self.template_ext:
for field in self.template_fields:
content = getattr(self, field, None)
if content is None:
continue
elif isinstance(content, str) and any(content.endswith(ext) for ext in self.template_ext):
env = self.get_template_env()
try:
setattr(self, field, env.loader.get_source(env, content)[0]) # type: ignore
except Exception:
self.log.exception("Failed to resolve template field %r", field)
elif isinstance(content, list):
env = self.get_template_env()
for i, item in enumerate(content):
if isinstance(item, str) and any(item.endswith(ext) for ext in self.template_ext):
try:
content[i] = env.loader.get_source(env, item)[0] # type: ignore
except Exception:
self.log.exception("Failed to get source %s", item)
self.prepare_template()
@provide_session
def _do_render_template_fields(
self,
parent: Any,
template_fields: Iterable[str],
context: Context,
jinja_env: jinja2.Environment,
seen_oids: set[int],
*,
session: Session = NEW_SESSION,
) -> None:
for attr_name in template_fields:
value = getattr(parent, attr_name)
rendered_content = self.render_template(
value,
context,
jinja_env,
seen_oids,
)
if rendered_content:
setattr(parent, attr_name, rendered_content)
def _render(self, template, context, dag: DAG | None = None) -> Any:
if dag and dag.render_template_as_native_obj:
return render_template_as_native(template, context)
return render_template_to_string(template, context)
def render_template(
self,
content: Any,
context: Context,
jinja_env: jinja2.Environment | None = None,
seen_oids: set[int] | None = None,
) -> Any:
"""Render a templated string.
If *content* is a collection holding multiple templated strings, strings
in the collection will be templated recursively.
:param content: Content to template. Only strings can be templated (may
be inside a collection).
:param context: Dict with values to apply on templated content
:param jinja_env: Jinja environment. Can be provided to avoid
re-creating Jinja environments during recursion.
:param seen_oids: template fields already rendered (to avoid
*RecursionError* on circular dependencies)
:return: Templated content
"""
# "content" is a bad name, but we're stuck to it being public API.
value = content
del content
if seen_oids is not None:
oids = seen_oids
else:
oids = set()
if id(value) in oids:
return value
if not jinja_env:
jinja_env = self.get_template_env()
if isinstance(value, str):
if any(value.endswith(ext) for ext in self.template_ext): # A filepath.
template = jinja_env.get_template(value)
else:
template = jinja_env.from_string(value)
return self._render(template, context)
if isinstance(value, ResolveMixin):
return value.resolve(context)
# Fast path for common built-in collections.
if value.__class__ is tuple:
return tuple(self.render_template(element, context, jinja_env, oids) for element in value)
elif isinstance(value, tuple): # Special case for named tuples.
return value.__class__(*(self.render_template(el, context, jinja_env, oids) for el in value))
elif isinstance(value, list):
return [self.render_template(element, context, jinja_env, oids) for element in value]
elif isinstance(value, dict):
return {k: self.render_template(v, context, jinja_env, oids) for k, v in value.items()}
elif isinstance(value, set):
return {self.render_template(element, context, jinja_env, oids) for element in value}
# More complex collections.
self._render_nested_template_fields(value, context, jinja_env, oids)
return value
def _render_nested_template_fields(
self,
value: Any,
context: Context,
jinja_env: jinja2.Environment,
seen_oids: set[int],
) -> None:
if id(value) in seen_oids:
return
seen_oids.add(id(value))
try:
nested_template_fields = value.template_fields
except AttributeError:
# content has no inner template fields
return
self._do_render_template_fields(value, nested_template_fields, context, jinja_env, seen_oids)
| 7,764 | 39.442708 | 106 |
py
|
airflow
|
airflow-main/airflow/datasets/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, ClassVar
from urllib.parse import urlsplit
import attr
@attr.define()
class Dataset:
"""A Dataset is used for marking data dependencies between workflows."""
uri: str = attr.field(validator=[attr.validators.min_len(1), attr.validators.max_len(3000)])
extra: dict[str, Any] | None = None
__version__: ClassVar[int] = 1
@uri.validator
def _check_uri(self, attr, uri: str):
if uri.isspace():
raise ValueError(f"{attr.name} cannot be just whitespace")
try:
uri.encode("ascii")
except UnicodeEncodeError:
raise ValueError(f"{attr.name!r} must be ascii")
parsed = urlsplit(uri)
if parsed.scheme and parsed.scheme.lower() == "airflow":
raise ValueError(f"{attr.name!r} scheme `airflow` is reserved")
| 1,656 | 35.822222 | 96 |
py
|
airflow
|
airflow-main/airflow/datasets/manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from sqlalchemy import exc
from sqlalchemy.orm.session import Session
from airflow.configuration import conf
from airflow.datasets import Dataset
from airflow.models.dataset import DatasetDagRunQueue, DatasetEvent, DatasetModel
from airflow.stats import Stats
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstance
class DatasetManager(LoggingMixin):
"""
A pluggable class that manages operations for datasets.
The intent is to have one place to handle all Dataset-related operations, so different
Airflow deployments can use plugins that broadcast dataset events to each other.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def register_dataset_change(
self, *, task_instance: TaskInstance, dataset: Dataset, extra=None, session: Session, **kwargs
) -> None:
"""
Register dataset related changes.
For local datasets, look them up, record the dataset event, queue dagruns, and broadcast
the dataset event
"""
dataset_model = session.query(DatasetModel).filter(DatasetModel.uri == dataset.uri).one_or_none()
if not dataset_model:
self.log.warning("DatasetModel %s not found", dataset)
return
session.add(
DatasetEvent(
dataset_id=dataset_model.id,
source_task_id=task_instance.task_id,
source_dag_id=task_instance.dag_id,
source_run_id=task_instance.run_id,
source_map_index=task_instance.map_index,
extra=extra,
)
)
session.flush()
Stats.incr("dataset.updates")
if dataset_model.consuming_dags:
self._queue_dagruns(dataset_model, session)
session.flush()
def _queue_dagruns(self, dataset: DatasetModel, session: Session) -> None:
# Possible race condition: if multiple dags or multiple (usually
# mapped) tasks update the same dataset, this can fail with a unique
# constraint violation.
#
# If we support it, use ON CONFLICT to do nothing, otherwise
# "fallback" to running this in a nested transaction. This is needed
# so that the adding of these rows happens in the same transaction
# where `ti.state` is changed.
if session.bind.dialect.name == "postgresql":
return self._postgres_queue_dagruns(dataset, session)
return self._slow_path_queue_dagruns(dataset, session)
def _slow_path_queue_dagruns(self, dataset: DatasetModel, session: Session) -> None:
consuming_dag_ids = [x.dag_id for x in dataset.consuming_dags]
self.log.debug("consuming dag ids %s", consuming_dag_ids)
# Don't error whole transaction when a single RunQueue item conflicts.
# https://docs.sqlalchemy.org/en/14/orm/session_transaction.html#using-savepoint
for dag_id in consuming_dag_ids:
item = DatasetDagRunQueue(target_dag_id=dag_id, dataset_id=dataset.id)
try:
with session.begin_nested():
session.merge(item)
except exc.IntegrityError:
self.log.debug("Skipping record %s", item, exc_info=True)
def _postgres_queue_dagruns(self, dataset: DatasetModel, session: Session) -> None:
from sqlalchemy.dialects.postgresql import insert
stmt = insert(DatasetDagRunQueue).values(dataset_id=dataset.id).on_conflict_do_nothing()
session.execute(
stmt,
[{"target_dag_id": target_dag.dag_id} for target_dag in dataset.consuming_dags],
)
def resolve_dataset_manager() -> DatasetManager:
"""Retrieve the dataset manager."""
_dataset_manager_class = conf.getimport(
section="core",
key="dataset_manager_class",
fallback="airflow.datasets.manager.DatasetManager",
)
_dataset_manager_kwargs = conf.getjson(
section="core",
key="dataset_manager_kwargs",
fallback={},
)
return _dataset_manager_class(**_dataset_manager_kwargs)
dataset_manager = resolve_dataset_manager()
| 5,065 | 38.271318 | 105 |
py
|
airflow
|
airflow-main/airflow/api_connexion/exceptions.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from typing import Any
import flask
import werkzeug
from connexion import FlaskApi, ProblemException, problem
from airflow.utils.docs import get_docs_url
doc_link = get_docs_url("stable-rest-api-ref.html")
EXCEPTIONS_LINK_MAP = {
400: f"{doc_link}#section/Errors/BadRequest",
404: f"{doc_link}#section/Errors/NotFound",
405: f"{doc_link}#section/Errors/MethodNotAllowed",
401: f"{doc_link}#section/Errors/Unauthenticated",
409: f"{doc_link}#section/Errors/AlreadyExists",
403: f"{doc_link}#section/Errors/PermissionDenied",
500: f"{doc_link}#section/Errors/Unknown",
}
def common_error_handler(exception: BaseException) -> flask.Response:
"""Used to capture connexion exceptions and add link to the type field."""
if isinstance(exception, ProblemException):
link = EXCEPTIONS_LINK_MAP.get(exception.status)
if link:
response = problem(
status=exception.status,
title=exception.title,
detail=exception.detail,
type=link,
instance=exception.instance,
headers=exception.headers,
ext=exception.ext,
)
else:
response = problem(
status=exception.status,
title=exception.title,
detail=exception.detail,
type=exception.type,
instance=exception.instance,
headers=exception.headers,
ext=exception.ext,
)
else:
if not isinstance(exception, werkzeug.exceptions.HTTPException):
exception = werkzeug.exceptions.InternalServerError()
response = problem(title=exception.name, detail=exception.description, status=exception.code)
return FlaskApi.get_response(response)
class NotFound(ProblemException):
"""Raise when the object cannot be found."""
def __init__(
self,
title: str = "Not Found",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
) -> None:
super().__init__(
status=HTTPStatus.NOT_FOUND,
type=EXCEPTIONS_LINK_MAP[404],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
class BadRequest(ProblemException):
"""Raise when the server processes a bad request."""
def __init__(
self,
title: str = "Bad Request",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
) -> None:
super().__init__(
status=HTTPStatus.BAD_REQUEST,
type=EXCEPTIONS_LINK_MAP[400],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
class Unauthenticated(ProblemException):
"""Raise when the user is not authenticated."""
def __init__(
self,
title: str = "Unauthorized",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
):
super().__init__(
status=HTTPStatus.UNAUTHORIZED,
type=EXCEPTIONS_LINK_MAP[401],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
class PermissionDenied(ProblemException):
"""Raise when the user does not have the required permissions."""
def __init__(
self,
title: str = "Forbidden",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
) -> None:
super().__init__(
status=HTTPStatus.FORBIDDEN,
type=EXCEPTIONS_LINK_MAP[403],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
class AlreadyExists(ProblemException):
"""Raise when the object already exists."""
def __init__(
self,
title="Conflict",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
):
super().__init__(
status=HTTPStatus.CONFLICT,
type=EXCEPTIONS_LINK_MAP[409],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
class Unknown(ProblemException):
"""Returns a response body and status code for HTTP 500 exception."""
def __init__(
self,
title: str = "Internal Server Error",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
) -> None:
super().__init__(
status=HTTPStatus.INTERNAL_SERVER_ERROR,
type=EXCEPTIONS_LINK_MAP[500],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
| 5,674 | 28.404145 | 101 |
py
|
airflow
|
airflow-main/airflow/api_connexion/types.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
from flask import Response
APIResponse = Union[
Response,
Tuple[object, int], # For '(NoContent, 201)'.
Mapping[str, Any], # JSON.
]
UpdateMask = Optional[Sequence[str]]
| 1,074 | 34.833333 | 65 |
py
|
airflow
|
airflow-main/airflow/api_connexion/security.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import wraps
from typing import Callable, Sequence, TypeVar, cast
from flask import Response
from airflow.api_connexion.exceptions import PermissionDenied, Unauthenticated
from airflow.utils.airflow_flask_app import get_airflow_app
T = TypeVar("T", bound=Callable)
def check_authentication() -> None:
"""Checks that the request has valid authorization information."""
for auth in get_airflow_app().api_auth:
response = auth.requires_authentication(Response)()
if response.status_code == 200:
return
# since this handler only checks authentication, not authorization,
# we should always return 401
raise Unauthenticated(headers=response.headers)
def requires_access(permissions: Sequence[tuple[str, str]] | None = None) -> Callable[[T], T]:
"""Factory for decorator that checks current user's permissions against required permissions."""
appbuilder = get_airflow_app().appbuilder
if appbuilder.update_perms:
appbuilder.sm.sync_resource_permissions(permissions)
def requires_access_decorator(func: T):
@wraps(func)
def decorated(*args, **kwargs):
check_authentication()
if appbuilder.sm.check_authorization(permissions, kwargs.get("dag_id")):
return func(*args, **kwargs)
raise PermissionDenied()
return cast(T, decorated)
return requires_access_decorator
| 2,252 | 37.844828 | 100 |
py
|
airflow
|
airflow-main/airflow/api_connexion/parameters.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from datetime import datetime
from functools import wraps
from typing import Any, Callable, Container, TypeVar, cast
from pendulum.parsing import ParserError
from sqlalchemy import text
from sqlalchemy.sql import Select
from airflow.api_connexion.exceptions import BadRequest
from airflow.configuration import conf
from airflow.utils import timezone
log = logging.getLogger(__name__)
def validate_istimezone(value: datetime) -> None:
"""Validates that a datetime is not naive."""
if not value.tzinfo:
raise BadRequest("Invalid datetime format", detail="Naive datetime is disallowed")
def format_datetime(value: str) -> datetime:
"""
Format datetime objects.
Datetime format parser for args since connexion doesn't parse datetimes
https://github.com/zalando/connexion/issues/476
This should only be used within connection views because it raises 400
"""
value = value.strip()
if value[-1] != "Z":
value = value.replace(" ", "+")
try:
return timezone.parse(value)
except (ParserError, TypeError) as err:
raise BadRequest("Incorrect datetime argument", detail=str(err))
def check_limit(value: int) -> int:
"""
Check the limit does not exceed configured value.
This checks the limit passed to view and raises BadRequest if
limit exceed user configured value
"""
max_val = conf.getint("api", "maximum_page_limit") # user configured max page limit
fallback = conf.getint("api", "fallback_page_limit")
if value > max_val:
log.warning(
"The limit param value %s passed in API exceeds the configured maximum page limit %s",
value,
max_val,
)
return max_val
if value == 0:
return fallback
if value < 0:
raise BadRequest("Page limit must be a positive integer")
return value
T = TypeVar("T", bound=Callable)
def format_parameters(params_formatters: dict[str, Callable[[Any], Any]]) -> Callable[[T], T]:
"""
Decorator factory that create decorator that convert parameters using given formatters.
Using it allows you to separate parameter formatting from endpoint logic.
:param params_formatters: Map of key name and formatter function
"""
def format_parameters_decorator(func: T) -> T:
@wraps(func)
def wrapped_function(*args, **kwargs):
for key, formatter in params_formatters.items():
if key in kwargs:
kwargs[key] = formatter(kwargs[key])
return func(*args, **kwargs)
return cast(T, wrapped_function)
return format_parameters_decorator
def apply_sorting(
query: Select,
order_by: str,
to_replace: dict[str, str] | None = None,
allowed_attrs: Container[str] | None = None,
) -> Select:
"""Apply sorting to query."""
lstriped_orderby = order_by.lstrip("-")
if allowed_attrs and lstriped_orderby not in allowed_attrs:
raise BadRequest(
detail=f"Ordering with '{lstriped_orderby}' is disallowed or "
f"the attribute does not exist on the model"
)
if to_replace:
lstriped_orderby = to_replace.get(lstriped_orderby, lstriped_orderby)
if order_by[0] == "-":
order_by = f"{lstriped_orderby} desc"
else:
order_by = f"{lstriped_orderby} asc"
return query.order_by(text(order_by))
| 4,242 | 32.148438 | 98 |
py
|
airflow
|
airflow-main/airflow/api_connexion/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/common_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import inspect
import json
import typing
import marshmallow
from dateutil import relativedelta
from marshmallow import Schema, fields, validate
from marshmallow_oneofschema import OneOfSchema
from airflow.models.mappedoperator import MappedOperator
from airflow.serialization.serialized_objects import SerializedBaseOperator
from airflow.utils.weight_rule import WeightRule
class CronExpression(typing.NamedTuple):
"""Cron expression schema."""
value: str
class TimeDeltaSchema(Schema):
"""Time delta schema."""
objectType = fields.Constant("TimeDelta", data_key="__type")
days = fields.Integer()
seconds = fields.Integer()
microseconds = fields.Integer()
@marshmallow.post_load
def make_time_delta(self, data, **kwargs):
"""Create time delta based on data."""
if "objectType" in data:
del data["objectType"]
return datetime.timedelta(**data)
class RelativeDeltaSchema(Schema):
"""Relative delta schema."""
objectType = fields.Constant("RelativeDelta", data_key="__type")
years = fields.Integer()
months = fields.Integer()
days = fields.Integer()
leapdays = fields.Integer()
hours = fields.Integer()
minutes = fields.Integer()
seconds = fields.Integer()
microseconds = fields.Integer()
year = fields.Integer()
month = fields.Integer()
day = fields.Integer()
hour = fields.Integer()
minute = fields.Integer()
second = fields.Integer()
microsecond = fields.Integer()
@marshmallow.post_load
def make_relative_delta(self, data, **kwargs):
"""Create relative delta based on data."""
if "objectType" in data:
del data["objectType"]
return relativedelta.relativedelta(**data)
class CronExpressionSchema(Schema):
"""Cron expression schema."""
objectType = fields.Constant("CronExpression", data_key="__type")
value = fields.String(required=True)
@marshmallow.post_load
def make_cron_expression(self, data, **kwargs):
"""Create cron expression based on data."""
return CronExpression(data["value"])
class ScheduleIntervalSchema(OneOfSchema):
"""
Schedule interval.
It supports the following types:
* TimeDelta
* RelativeDelta
* CronExpression
"""
type_field = "__type"
type_schemas = {
"TimeDelta": TimeDeltaSchema,
"RelativeDelta": RelativeDeltaSchema,
"CronExpression": CronExpressionSchema,
}
def _dump(self, obj, update_fields=True, **kwargs):
if isinstance(obj, str):
obj = CronExpression(obj)
return super()._dump(obj, update_fields=update_fields, **kwargs)
def get_obj_type(self, obj):
"""Select schema based on object type."""
if isinstance(obj, datetime.timedelta):
return "TimeDelta"
elif isinstance(obj, relativedelta.relativedelta):
return "RelativeDelta"
elif isinstance(obj, CronExpression):
return "CronExpression"
else:
raise Exception(f"Unknown object type: {obj.__class__.__name__}")
class ColorField(fields.String):
"""Schema for color property."""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.Regexp("^#[a-fA-F0-9]{3,6}$")] + list(self.validators)
class WeightRuleField(fields.String):
"""Schema for WeightRule."""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.OneOf(WeightRule.all_weight_rules())] + list(self.validators)
class TimezoneField(fields.String):
"""Schema for timezone."""
class ClassReferenceSchema(Schema):
"""Class reference schema."""
module_path = fields.Method("_get_module", required=True)
class_name = fields.Method("_get_class_name", required=True)
def _get_module(self, obj):
if isinstance(obj, (MappedOperator, SerializedBaseOperator)):
return obj._task_module
return inspect.getmodule(obj).__name__
def _get_class_name(self, obj):
if isinstance(obj, (MappedOperator, SerializedBaseOperator)):
return obj._task_type
if isinstance(obj, type):
return obj.__name__
return type(obj).__name__
class JsonObjectField(fields.Field):
"""JSON object field."""
def _serialize(self, value, attr, obj, **kwargs):
if not value:
return {}
return json.loads(value) if isinstance(value, str) else value
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, str):
return json.loads(value)
return value
| 5,530 | 29.059783 | 97 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/health_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow import Schema, fields
class BaseInfoSchema(Schema):
"""Base status field for metadatabase and scheduler."""
status = fields.String(dump_only=True)
class MetaDatabaseInfoSchema(BaseInfoSchema):
"""Schema for Metadatabase info."""
class SchedulerInfoSchema(BaseInfoSchema):
"""Schema for Scheduler info."""
latest_scheduler_heartbeat = fields.String(dump_only=True)
class TriggererInfoSchema(BaseInfoSchema):
"""Schema for Triggerer info."""
latest_triggerer_heartbeat = fields.String(dump_only=True)
class DagProcessorInfoSchema(BaseInfoSchema):
"""Schema for DagProcessor info."""
latest_dag_processor_heartbeat = fields.String(dump_only=True)
class HealthInfoSchema(Schema):
"""Schema for the Health endpoint."""
metadatabase = fields.Nested(MetaDatabaseInfoSchema)
scheduler = fields.Nested(SchedulerInfoSchema)
triggerer = fields.Nested(TriggererInfoSchema)
dag_processor = fields.Nested(DagProcessorInfoSchema)
health_schema = HealthInfoSchema()
| 1,863 | 30.066667 | 66 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/dataset_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.api_connexion.schemas.common_schema import JsonObjectField
from airflow.models.dagrun import DagRun
from airflow.models.dataset import (
DagScheduleDatasetReference,
DatasetEvent,
DatasetModel,
TaskOutletDatasetReference,
)
class TaskOutletDatasetReferenceSchema(SQLAlchemySchema):
"""TaskOutletDatasetReference DB schema."""
class Meta:
"""Meta."""
model = TaskOutletDatasetReference
dag_id = auto_field()
task_id = auto_field()
created_at = auto_field()
updated_at = auto_field()
class DagScheduleDatasetReferenceSchema(SQLAlchemySchema):
"""DagScheduleDatasetReference DB schema."""
class Meta:
"""Meta."""
model = DagScheduleDatasetReference
dag_id = auto_field()
created_at = auto_field()
updated_at = auto_field()
class DatasetSchema(SQLAlchemySchema):
"""Dataset DB schema."""
class Meta:
"""Meta."""
model = DatasetModel
id = auto_field()
uri = auto_field()
extra = JsonObjectField()
created_at = auto_field()
updated_at = auto_field()
producing_tasks = fields.List(fields.Nested(TaskOutletDatasetReferenceSchema))
consuming_dags = fields.List(fields.Nested(DagScheduleDatasetReferenceSchema))
class DatasetCollection(NamedTuple):
"""List of Datasets with meta."""
datasets: list[DatasetModel]
total_entries: int
class DatasetCollectionSchema(Schema):
"""Dataset Collection Schema."""
datasets = fields.List(fields.Nested(DatasetSchema))
total_entries = fields.Int()
dataset_schema = DatasetSchema()
dataset_collection_schema = DatasetCollectionSchema()
class BasicDAGRunSchema(SQLAlchemySchema):
"""Basic Schema for DAGRun."""
class Meta:
"""Meta."""
model = DagRun
dateformat = "iso"
run_id = auto_field(data_key="dag_run_id")
dag_id = auto_field(dump_only=True)
execution_date = auto_field(data_key="logical_date", dump_only=True)
start_date = auto_field(dump_only=True)
end_date = auto_field(dump_only=True)
state = auto_field(dump_only=True)
data_interval_start = auto_field(dump_only=True)
data_interval_end = auto_field(dump_only=True)
class DatasetEventSchema(SQLAlchemySchema):
"""Dataset Event DB schema."""
class Meta:
"""Meta."""
model = DatasetEvent
id = auto_field()
dataset_id = auto_field()
dataset_uri = fields.String(attribute="dataset.uri", dump_only=True)
extra = JsonObjectField()
source_task_id = auto_field()
source_dag_id = auto_field()
source_run_id = auto_field()
source_map_index = auto_field()
created_dagruns = fields.List(fields.Nested(BasicDAGRunSchema))
timestamp = auto_field()
class DatasetEventCollection(NamedTuple):
"""List of Dataset events with meta."""
dataset_events: list[DatasetEvent]
total_entries: int
class DatasetEventCollectionSchema(Schema):
"""Dataset Event Collection Schema."""
dataset_events = fields.List(fields.Nested(DatasetEventSchema))
total_entries = fields.Int()
dataset_event_schema = DatasetEventSchema()
dataset_event_collection_schema = DatasetEventCollectionSchema()
| 4,170 | 26.622517 | 82 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/sla_miss_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models import SlaMiss
class SlaMissSchema(SQLAlchemySchema):
"""Sla Miss Schema."""
class Meta:
"""Meta."""
model = SlaMiss
task_id = auto_field(dump_only=True)
dag_id = auto_field(dump_only=True)
execution_date = auto_field(dump_only=True)
email_sent = auto_field(dump_only=True)
timestamp = auto_field(dump_only=True)
description = auto_field(dump_only=True)
notification_sent = auto_field(dump_only=True)
| 1,364 | 34 | 63 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/variable_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow import Schema, fields
class VariableSchema(Schema):
"""Variable Schema."""
key = fields.String(required=True)
value = fields.String(attribute="val", required=True)
description = fields.String(attribute="description", required=False)
class VariableCollectionSchema(Schema):
"""Variable Collection Schema."""
variables = fields.List(fields.Nested(VariableSchema))
total_entries = fields.Int()
variable_schema = VariableSchema()
variable_collection_schema = VariableCollectionSchema()
| 1,356 | 33.794872 | 72 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/role_and_permission_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.www.fab_security.sqla.models import Action, Permission, Resource, Role
class ActionSchema(SQLAlchemySchema):
"""Action Schema."""
class Meta:
"""Meta."""
model = Action
name = auto_field()
class ResourceSchema(SQLAlchemySchema):
"""View menu Schema."""
class Meta:
"""Meta."""
model = Resource
name = auto_field()
class ActionCollection(NamedTuple):
"""Action Collection."""
actions: list[Action]
total_entries: int
class ActionCollectionSchema(Schema):
"""Permissions list schema."""
actions = fields.List(fields.Nested(ActionSchema))
total_entries = fields.Int()
class ActionResourceSchema(SQLAlchemySchema):
"""Action View Schema."""
class Meta:
"""Meta."""
model = Permission
action = fields.Nested(ActionSchema, data_key="action")
resource = fields.Nested(ResourceSchema, data_key="resource")
class RoleSchema(SQLAlchemySchema):
"""Role item schema."""
class Meta:
"""Meta."""
model = Role
name = auto_field()
permissions = fields.List(fields.Nested(ActionResourceSchema), data_key="actions")
class RoleCollection(NamedTuple):
"""List of roles."""
roles: list[Role]
total_entries: int
class RoleCollectionSchema(Schema):
"""List of roles."""
roles = fields.List(fields.Nested(RoleSchema))
total_entries = fields.Int()
role_schema = RoleSchema()
role_collection_schema = RoleCollectionSchema()
action_collection_schema = ActionCollectionSchema()
| 2,521 | 23.25 | 86 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/xcom_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models import XCom
class XComCollectionItemSchema(SQLAlchemySchema):
"""Schema for a xcom item."""
class Meta:
"""Meta."""
model = XCom
key = auto_field()
timestamp = auto_field()
execution_date = auto_field()
map_index = auto_field()
task_id = auto_field()
dag_id = auto_field()
class XComSchema(XComCollectionItemSchema):
"""XCom schema."""
value = auto_field()
class XComCollection(NamedTuple):
"""List of XComs with meta."""
xcom_entries: list[XCom]
total_entries: int
class XComCollectionSchema(Schema):
"""XCom Collection Schema."""
xcom_entries = fields.List(fields.Nested(XComCollectionItemSchema))
total_entries = fields.Int()
xcom_schema = XComSchema()
xcom_collection_item_schema = XComCollectionItemSchema()
xcom_collection_schema = XComCollectionSchema()
| 1,833 | 26.787879 | 71 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/connection_schema.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.connection import Connection
class ConnectionCollectionItemSchema(SQLAlchemySchema):
"""Schema for a connection item."""
class Meta:
"""Meta."""
model = Connection
connection_id = auto_field("conn_id", required=True)
conn_type = auto_field(required=True)
description = auto_field()
host = auto_field()
login = auto_field()
schema = auto_field()
port = auto_field()
class ConnectionSchema(ConnectionCollectionItemSchema):
"""Connection schema."""
password = auto_field(load_only=True)
extra = fields.Method("serialize_extra", deserialize="deserialize_extra", allow_none=True)
@staticmethod
def serialize_extra(obj: Connection):
if obj.extra is None:
return
from airflow.utils.log.secrets_masker import redact
try:
extra = json.loads(obj.extra)
return json.dumps(redact(extra))
except json.JSONDecodeError:
# we can't redact fields in an unstructured `extra`
return obj.extra
@staticmethod
def deserialize_extra(value): # an explicit deserialize method is required for field.Method
return value
class ConnectionCollection(NamedTuple):
"""List of Connections with meta."""
connections: list[Connection]
total_entries: int
class ConnectionCollectionSchema(Schema):
"""Connection Collection Schema."""
connections = fields.List(fields.Nested(ConnectionCollectionItemSchema))
total_entries = fields.Int()
class ConnectionTestSchema(Schema):
"""connection Test Schema."""
status = fields.Boolean(required=True)
message = fields.String(required=True)
connection_schema = ConnectionSchema()
connection_collection_item_schema = ConnectionCollectionItemSchema()
connection_collection_schema = ConnectionCollectionSchema()
connection_test_schema = ConnectionTestSchema()
| 2,888 | 29.410526 | 96 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/dag_source_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow import Schema, fields
class DagSourceSchema(Schema):
"""Dag Source schema."""
content = fields.String(dump_only=True)
dag_source_schema = DagSourceSchema()
| 1,007 | 33.758621 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/pool_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.pool import Pool
class PoolSchema(SQLAlchemySchema):
"""Pool schema."""
class Meta:
"""Meta."""
model = Pool
name = auto_field("pool")
slots = auto_field()
occupied_slots = fields.Method("get_occupied_slots", dump_only=True)
running_slots = fields.Method("get_running_slots", dump_only=True)
queued_slots = fields.Method("get_queued_slots", dump_only=True)
scheduled_slots = fields.Method("get_scheduled_slots", dump_only=True)
open_slots = fields.Method("get_open_slots", dump_only=True)
description = auto_field()
@staticmethod
def get_occupied_slots(obj: Pool) -> int:
"""Returns the occupied slots of the pool."""
return obj.occupied_slots()
@staticmethod
def get_running_slots(obj: Pool) -> int:
"""Returns the running slots of the pool."""
return obj.running_slots()
@staticmethod
def get_queued_slots(obj: Pool) -> int:
"""Returns the queued slots of the pool."""
return obj.queued_slots()
@staticmethod
def get_scheduled_slots(obj: Pool) -> int:
"""Returns the scheduled slots of the pool."""
return obj.scheduled_slots()
@staticmethod
def get_open_slots(obj: Pool) -> float:
"""Returns the open slots of the pool."""
return obj.open_slots()
class PoolCollection(NamedTuple):
"""List of Pools with metadata."""
pools: list[Pool]
total_entries: int
class PoolCollectionSchema(Schema):
"""Pool Collection schema."""
pools = fields.List(fields.Nested(PoolSchema))
total_entries = fields.Int()
pool_collection_schema = PoolCollectionSchema()
pool_schema = PoolSchema()
| 2,667 | 30.023256 | 74 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/version_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow import Schema, fields
class VersionInfoSchema(Schema):
"""Version information schema."""
version = fields.String(dump_only=True)
git_version = fields.String(dump_only=True)
version_info_schema = VersionInfoSchema()
| 1,070 | 34.7 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/dag_run_schema.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from typing import NamedTuple
from marshmallow import fields, post_dump, pre_load, validate
from marshmallow.schema import Schema
from marshmallow.validate import Range
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from pendulum.parsing import ParserError
from airflow.api_connexion.exceptions import BadRequest
from airflow.api_connexion.parameters import validate_istimezone
from airflow.api_connexion.schemas.enum_schemas import DagStateField
from airflow.models.dagrun import DagRun
from airflow.utils import timezone
from airflow.utils.state import DagRunState
from airflow.utils.types import DagRunType
class ConfObject(fields.Field):
"""The conf field."""
def _serialize(self, value, attr, obj, **kwargs):
if not value:
return {}
return json.loads(value) if isinstance(value, str) else value
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, str):
return json.loads(value)
return value
_MISSING = object()
class DAGRunSchema(SQLAlchemySchema):
"""Schema for DAGRun."""
class Meta:
"""Meta."""
model = DagRun
dateformat = "iso"
run_id = auto_field(data_key="dag_run_id")
dag_id = auto_field(dump_only=True)
execution_date = auto_field(data_key="logical_date", validate=validate_istimezone)
start_date = auto_field(dump_only=True)
end_date = auto_field(dump_only=True)
state = DagStateField(dump_only=True)
external_trigger = auto_field(dump_default=True, dump_only=True)
conf = ConfObject()
data_interval_start = auto_field(dump_only=True)
data_interval_end = auto_field(dump_only=True)
last_scheduling_decision = auto_field(dump_only=True)
run_type = auto_field(dump_only=True)
note = auto_field(dump_only=False)
@pre_load
def autogenerate(self, data, **kwargs):
"""Auto generate run_id and logical_date if they are not provided.
For compatibility, if `execution_date` is submitted, it is converted
to `logical_date`.
"""
logical_date = data.get("logical_date", _MISSING)
execution_date = data.pop("execution_date", _MISSING)
if logical_date is execution_date is _MISSING: # Both missing.
data["logical_date"] = str(timezone.utcnow())
elif logical_date is _MISSING: # Only logical_date missing.
data["logical_date"] = execution_date
elif execution_date is _MISSING: # Only execution_date missing.
pass
elif logical_date != execution_date: # Both provided but don't match.
raise BadRequest(
"logical_date conflicts with execution_date",
detail=f"{logical_date!r} != {execution_date!r}",
)
if "dag_run_id" not in data:
try:
data["dag_run_id"] = DagRun.generate_run_id(
DagRunType.MANUAL, timezone.parse(data["logical_date"])
)
except (ParserError, TypeError) as err:
raise BadRequest("Incorrect datetime argument", detail=str(err))
return data
@post_dump
def autofill(self, data, **kwargs):
"""Populate execution_date from logical_date for compatibility."""
data["execution_date"] = data["logical_date"]
return data
class SetDagRunStateFormSchema(Schema):
"""Schema for handling the request of setting state of DAG run."""
state = DagStateField(
validate=validate.OneOf(
[DagRunState.SUCCESS.value, DagRunState.FAILED.value, DagRunState.QUEUED.value]
)
)
class ClearDagRunStateFormSchema(Schema):
"""Schema for handling the request of clearing a DAG run."""
dry_run = fields.Boolean(load_default=True)
class DAGRunCollection(NamedTuple):
"""List of DAGRuns with metadata."""
dag_runs: list[DagRun]
total_entries: int
class DAGRunCollectionSchema(Schema):
"""DAGRun Collection schema."""
dag_runs = fields.List(fields.Nested(DAGRunSchema))
total_entries = fields.Int()
class DagRunsBatchFormSchema(Schema):
"""Schema to validate and deserialize the Form(request payload) submitted to DagRun Batch endpoint."""
class Meta:
"""Meta."""
datetimeformat = "iso"
strict = True
order_by = fields.String()
page_offset = fields.Int(load_default=0, validate=Range(min=0))
page_limit = fields.Int(load_default=100, validate=Range(min=1))
dag_ids = fields.List(fields.Str(), load_default=None)
states = fields.List(fields.Str(), load_default=None)
execution_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
execution_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
start_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
start_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
end_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
end_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
updated_at_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
updated_at_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
class SetDagRunNoteFormSchema(Schema):
"""Schema for handling the request of clearing a DAG run."""
note = fields.String(allow_none=True, validate=validate.Length(max=1000))
dagrun_schema = DAGRunSchema()
dagrun_collection_schema = DAGRunCollectionSchema()
set_dagrun_state_form_schema = SetDagRunStateFormSchema()
clear_dagrun_form_schema = ClearDagRunStateFormSchema()
dagruns_batch_form_schema = DagRunsBatchFormSchema()
set_dagrun_note_form_schema = SetDagRunNoteFormSchema()
| 6,636 | 35.668508 | 106 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/error_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.errors import ImportError
class ImportErrorSchema(SQLAlchemySchema):
"""Import error schema."""
class Meta:
"""Meta."""
model = ImportError
import_error_id = auto_field("id", dump_only=True)
timestamp = auto_field(format="iso")
filename = auto_field()
stack_trace = auto_field(
"stacktrace",
)
class ImportErrorCollection(NamedTuple):
"""List of import errors with metadata."""
import_errors: list[ImportError]
total_entries: int
class ImportErrorCollectionSchema(Schema):
"""Import error collection schema."""
import_errors = fields.List(fields.Nested(ImportErrorSchema))
total_entries = fields.Int()
import_error_schema = ImportErrorSchema()
import_error_collection_schema = ImportErrorCollectionSchema()
| 1,772 | 29.050847 | 65 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/dag_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from itsdangerous import URLSafeSerializer
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow import DAG
from airflow.api_connexion.schemas.common_schema import ScheduleIntervalSchema, TimeDeltaSchema, TimezoneField
from airflow.configuration import conf
from airflow.models.dag import DagModel, DagTag
class DagTagSchema(SQLAlchemySchema):
"""Dag Tag schema."""
class Meta:
"""Meta."""
model = DagTag
name = auto_field()
class DAGSchema(SQLAlchemySchema):
"""DAG schema."""
class Meta:
"""Meta."""
model = DagModel
dag_id = auto_field(dump_only=True)
root_dag_id = auto_field(dump_only=True)
is_paused = auto_field()
is_active = auto_field(dump_only=True)
is_subdag = auto_field(dump_only=True)
last_parsed_time = auto_field(dump_only=True)
last_pickled = auto_field(dump_only=True)
last_expired = auto_field(dump_only=True)
scheduler_lock = auto_field(dump_only=True)
pickle_id = auto_field(dump_only=True)
default_view = auto_field(dump_only=True)
fileloc = auto_field(dump_only=True)
file_token = fields.Method("get_token", dump_only=True)
owners = fields.Method("get_owners", dump_only=True)
description = auto_field(dump_only=True)
schedule_interval = fields.Nested(ScheduleIntervalSchema)
timetable_description = auto_field(dump_only=True)
tags = fields.List(fields.Nested(DagTagSchema), dump_only=True)
max_active_tasks = auto_field(dump_only=True)
max_active_runs = auto_field(dump_only=True)
has_task_concurrency_limits = auto_field(dump_only=True)
has_import_errors = auto_field(dump_only=True)
next_dagrun = auto_field(dump_only=True)
next_dagrun_data_interval_start = auto_field(dump_only=True)
next_dagrun_data_interval_end = auto_field(dump_only=True)
next_dagrun_create_after = auto_field(dump_only=True)
@staticmethod
def get_owners(obj: DagModel):
"""Convert owners attribute to DAG representation."""
if not getattr(obj, "owners", None):
return []
return obj.owners.split(",")
@staticmethod
def get_token(obj: DagModel):
"""Return file token."""
serializer = URLSafeSerializer(conf.get_mandatory_value("webserver", "secret_key"))
return serializer.dumps(obj.fileloc)
class DAGDetailSchema(DAGSchema):
"""DAG details."""
owners = fields.Method("get_owners", dump_only=True)
timezone = TimezoneField()
catchup = fields.Boolean()
orientation = fields.String()
concurrency = fields.Method("get_concurrency") # TODO: Remove in Airflow 3.0
max_active_tasks = fields.Integer()
start_date = fields.DateTime()
dag_run_timeout = fields.Nested(TimeDeltaSchema, attribute="dagrun_timeout")
doc_md = fields.String()
default_view = fields.String()
params = fields.Method("get_params", dump_only=True)
tags = fields.Method("get_tags", dump_only=True) # type: ignore
is_paused = fields.Method("get_is_paused", dump_only=True)
is_active = fields.Method("get_is_active", dump_only=True)
is_paused_upon_creation = fields.Boolean()
end_date = fields.DateTime(dump_only=True)
template_search_path = fields.String(dump_only=True)
render_template_as_native_obj = fields.Boolean(dump_only=True)
last_loaded = fields.DateTime(dump_only=True, data_key="last_parsed")
@staticmethod
def get_concurrency(obj: DAG):
return obj.max_active_tasks
@staticmethod
def get_tags(obj: DAG):
"""Dumps tags as objects."""
tags = obj.tags
if tags:
return [DagTagSchema().dump(dict(name=tag)) for tag in tags]
return []
@staticmethod
def get_owners(obj: DAG):
"""Convert owners attribute to DAG representation."""
if not getattr(obj, "owner", None):
return []
return obj.owner.split(",")
@staticmethod
def get_is_paused(obj: DAG):
"""Checks entry in DAG table to see if this DAG is paused."""
return obj.get_is_paused()
@staticmethod
def get_is_active(obj: DAG):
"""Checks entry in DAG table to see if this DAG is active."""
return obj.get_is_active()
@staticmethod
def get_params(obj: DAG):
"""Get the Params defined in a DAG."""
params = obj.params
return {k: v.dump() for k, v in params.items()}
class DAGCollection(NamedTuple):
"""List of DAGs with metadata."""
dags: list[DagModel]
total_entries: int
class DAGCollectionSchema(Schema):
"""DAG Collection schema."""
dags = fields.List(fields.Nested(DAGSchema))
total_entries = fields.Int()
dags_collection_schema = DAGCollectionSchema()
dag_schema = DAGSchema()
dag_detail_schema = DAGDetailSchema()
| 5,708 | 32.982143 | 110 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/trigger_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models import Trigger
class TriggerSchema(SQLAlchemySchema):
"""Sla Miss Schema."""
class Meta:
"""Meta."""
model = Trigger
id = auto_field(dump_only=True)
classpath = auto_field(dump_only=True)
kwargs = auto_field(dump_only=True)
created_date = auto_field(dump_only=True)
triggerer_id = auto_field(dump_only=True)
| 1,264 | 32.289474 | 63 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/enum_schemas.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow import fields, validate
from airflow.utils.state import State
class DagStateField(fields.String):
"""Schema for DagState Enum."""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.OneOf(State.dag_states)] + list(self.validators)
class TaskInstanceStateField(fields.String):
"""Schema for TaskInstanceState Enum."""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.OneOf(State.task_states)] + list(self.validators)
| 1,386 | 35.5 | 85 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/task_instance_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, ValidationError, fields, validate, validates_schema
from marshmallow.utils import get_value
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.api_connexion.parameters import validate_istimezone
from airflow.api_connexion.schemas.common_schema import JsonObjectField
from airflow.api_connexion.schemas.enum_schemas import TaskInstanceStateField
from airflow.api_connexion.schemas.job_schema import JobSchema
from airflow.api_connexion.schemas.sla_miss_schema import SlaMissSchema
from airflow.api_connexion.schemas.trigger_schema import TriggerSchema
from airflow.models import SlaMiss, TaskInstance
from airflow.utils.helpers import exactly_one
from airflow.utils.state import TaskInstanceState
class TaskInstanceSchema(SQLAlchemySchema):
"""Task instance schema."""
class Meta:
"""Meta."""
model = TaskInstance
task_id = auto_field()
dag_id = auto_field()
run_id = auto_field(data_key="dag_run_id")
map_index = auto_field()
execution_date = auto_field()
start_date = auto_field()
end_date = auto_field()
duration = auto_field()
state = TaskInstanceStateField()
_try_number = auto_field(data_key="try_number")
max_tries = auto_field()
hostname = auto_field()
unixname = auto_field()
pool = auto_field()
pool_slots = auto_field()
queue = auto_field()
priority_weight = auto_field()
operator = auto_field()
queued_dttm = auto_field(data_key="queued_when")
pid = auto_field()
executor_config = auto_field()
note = auto_field()
sla_miss = fields.Nested(SlaMissSchema, dump_default=None)
rendered_fields = JsonObjectField(dump_default={})
trigger = fields.Nested(TriggerSchema)
triggerer_job = fields.Nested(JobSchema)
def get_attribute(self, obj, attr, default):
if attr == "sla_miss":
# Object is a tuple of task_instance and slamiss
# and the get_value expects a dict with key, value
# corresponding to the attr.
slamiss_instance = {"sla_miss": obj[1]}
return get_value(slamiss_instance, attr, default)
elif attr == "rendered_fields":
return get_value(obj[0], "rendered_task_instance_fields.rendered_fields", default)
return get_value(obj[0], attr, default)
class TaskInstanceCollection(NamedTuple):
"""List of task instances with metadata."""
task_instances: list[tuple[TaskInstance, SlaMiss | None]]
total_entries: int
class TaskInstanceCollectionSchema(Schema):
"""Task instance collection schema."""
task_instances = fields.List(fields.Nested(TaskInstanceSchema))
total_entries = fields.Int()
class TaskInstanceBatchFormSchema(Schema):
"""Schema for the request form passed to Task Instance Batch endpoint."""
page_offset = fields.Int(load_default=0, validate=validate.Range(min=0))
page_limit = fields.Int(load_default=100, validate=validate.Range(min=1))
dag_ids = fields.List(fields.Str(), load_default=None)
execution_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
execution_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
start_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
start_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
end_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
end_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
duration_gte = fields.Int(load_default=None)
duration_lte = fields.Int(load_default=None)
state = fields.List(fields.Str(), load_default=None)
pool = fields.List(fields.Str(), load_default=None)
queue = fields.List(fields.Str(), load_default=None)
class ClearTaskInstanceFormSchema(Schema):
"""Schema for handling the request of clearing task instance of a Dag."""
dry_run = fields.Boolean(load_default=True)
start_date = fields.DateTime(load_default=None, validate=validate_istimezone)
end_date = fields.DateTime(load_default=None, validate=validate_istimezone)
only_failed = fields.Boolean(load_default=True)
only_running = fields.Boolean(load_default=False)
include_subdags = fields.Boolean(load_default=False)
include_parentdag = fields.Boolean(load_default=False)
reset_dag_runs = fields.Boolean(load_default=False)
task_ids = fields.List(fields.String(), validate=validate.Length(min=1))
dag_run_id = fields.Str(load_default=None)
include_upstream = fields.Boolean(load_default=False)
include_downstream = fields.Boolean(load_default=False)
include_future = fields.Boolean(load_default=False)
include_past = fields.Boolean(load_default=False)
@validates_schema
def validate_form(self, data, **kwargs):
"""Validates clear task instance form."""
if data["only_failed"] and data["only_running"]:
raise ValidationError("only_failed and only_running both are set to True")
if data["start_date"] and data["end_date"]:
if data["start_date"] > data["end_date"]:
raise ValidationError("end_date is sooner than start_date")
if data["start_date"] and data["end_date"] and data["dag_run_id"]:
raise ValidationError("Exactly one of dag_run_id or (start_date and end_date) must be provided")
if data["start_date"] and data["dag_run_id"]:
raise ValidationError("Exactly one of dag_run_id or start_date must be provided")
if data["end_date"] and data["dag_run_id"]:
raise ValidationError("Exactly one of dag_run_id or end_date must be provided")
class SetTaskInstanceStateFormSchema(Schema):
"""Schema for handling the request of setting state of task instance of a DAG."""
dry_run = fields.Boolean(dump_default=True)
task_id = fields.Str(required=True)
execution_date = fields.DateTime(validate=validate_istimezone)
dag_run_id = fields.Str()
include_upstream = fields.Boolean(required=True)
include_downstream = fields.Boolean(required=True)
include_future = fields.Boolean(required=True)
include_past = fields.Boolean(required=True)
new_state = TaskInstanceStateField(
required=True,
validate=validate.OneOf(
[TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]
),
)
@validates_schema
def validate_form(self, data, **kwargs):
"""Validates set task instance state form."""
if not exactly_one(data.get("execution_date"), data.get("dag_run_id")):
raise ValidationError("Exactly one of execution_date or dag_run_id must be provided")
class SetSingleTaskInstanceStateFormSchema(Schema):
"""Schema for handling the request of updating state of a single task instance."""
dry_run = fields.Boolean(dump_default=True)
new_state = TaskInstanceStateField(
required=True,
validate=validate.OneOf(
[TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]
),
)
class TaskInstanceReferenceSchema(Schema):
"""Schema for the task instance reference schema."""
task_id = fields.Str()
run_id = fields.Str(data_key="dag_run_id")
dag_id = fields.Str()
execution_date = fields.DateTime()
class TaskInstanceReferenceCollection(NamedTuple):
"""List of objects with metadata about taskinstance and dag_run_id."""
task_instances: list[tuple[TaskInstance, str]]
class TaskInstanceReferenceCollectionSchema(Schema):
"""Collection schema for task reference."""
task_instances = fields.List(fields.Nested(TaskInstanceReferenceSchema))
class SetTaskInstanceNoteFormSchema(Schema):
"""Schema for settings a note for a TaskInstance."""
# Note: We can't add map_index to the url as subpaths can't start with dashes.
map_index = fields.Int(allow_none=False)
note = fields.String(allow_none=True, validate=validate.Length(max=1000))
task_instance_schema = TaskInstanceSchema()
task_instance_collection_schema = TaskInstanceCollectionSchema()
task_instance_batch_form = TaskInstanceBatchFormSchema()
clear_task_instance_form = ClearTaskInstanceFormSchema()
set_task_instance_state_form = SetTaskInstanceStateFormSchema()
set_single_task_instance_state_form = SetSingleTaskInstanceStateFormSchema()
task_instance_reference_schema = TaskInstanceReferenceSchema()
task_instance_reference_collection_schema = TaskInstanceReferenceCollectionSchema()
set_task_instance_note_form_schema = SetTaskInstanceNoteFormSchema()
| 9,507 | 41.257778 | 108 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/provider_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from airflow.typing_compat import TypedDict
class ProviderSchema(Schema):
"""Provider schema."""
package_name = fields.String(required=True)
description = fields.String(required=True)
version = fields.String(required=True)
class Provider(TypedDict):
"""A provider."""
package_name: str
description: str
version: str
class ProviderCollection(NamedTuple):
"""List of Providers."""
providers: list[Provider]
total_entries: int
class ProviderCollectionSchema(Schema):
"""Provider Collection schema."""
providers = fields.List(fields.Nested(ProviderSchema))
total_entries = fields.Int()
provider_collection_schema = ProviderCollectionSchema()
provider_schema = ProviderSchema()
| 1,635 | 27.206897 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/log_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
class LogsSchema(Schema):
"""Schema for logs."""
content = fields.Str()
continuation_token = fields.Str()
class LogResponseObject(NamedTuple):
"""Log Response Object."""
content: str
continuation_token: str | None
logs_schema = LogsSchema()
| 1,164 | 28.871795 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/task_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from airflow.api_connexion.schemas.common_schema import (
ClassReferenceSchema,
ColorField,
TimeDeltaSchema,
WeightRuleField,
)
from airflow.api_connexion.schemas.dag_schema import DAGSchema
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
class TaskSchema(Schema):
"""Task schema."""
class_ref = fields.Method("_get_class_reference", dump_only=True)
operator_name = fields.Method("_get_operator_name", dump_only=True)
task_id = fields.String(dump_only=True)
owner = fields.String(dump_only=True)
start_date = fields.DateTime(dump_only=True)
end_date = fields.DateTime(dump_only=True)
trigger_rule = fields.String(dump_only=True)
extra_links = fields.List(
fields.Nested(ClassReferenceSchema), dump_only=True, attribute="operator_extra_links"
)
depends_on_past = fields.Boolean(dump_only=True)
wait_for_downstream = fields.Boolean(dump_only=True)
retries = fields.Number(dump_only=True)
queue = fields.String(dump_only=True)
pool = fields.String(dump_only=True)
pool_slots = fields.Number(dump_only=True)
execution_timeout = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_delay = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_exponential_backoff = fields.Boolean(dump_only=True)
priority_weight = fields.Number(dump_only=True)
weight_rule = WeightRuleField(dump_only=True)
ui_color = ColorField(dump_only=True)
ui_fgcolor = ColorField(dump_only=True)
template_fields = fields.List(fields.String(), dump_only=True)
sub_dag = fields.Nested(DAGSchema, dump_only=True)
downstream_task_ids = fields.List(fields.String(), dump_only=True)
params = fields.Method("_get_params", dump_only=True)
is_mapped = fields.Method("_get_is_mapped", dump_only=True)
@staticmethod
def _get_class_reference(obj):
result = ClassReferenceSchema().dump(obj)
return result.data if hasattr(result, "data") else result
@staticmethod
def _get_operator_name(obj):
return obj.operator_name
@staticmethod
def _get_params(obj):
"""Get the Params defined in a Task."""
params = obj.params
return {k: v.dump() for k, v in params.items()}
@staticmethod
def _get_is_mapped(obj):
return isinstance(obj, MappedOperator)
class TaskCollection(NamedTuple):
"""List of Tasks with metadata."""
tasks: list[Operator]
total_entries: int
class TaskCollectionSchema(Schema):
"""Schema for TaskCollection."""
tasks = fields.List(fields.Nested(TaskSchema))
total_entries = fields.Int()
task_schema = TaskSchema()
task_collection_schema = TaskCollectionSchema()
| 3,632 | 34.617647 | 93 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/job_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.jobs.job import Job
class JobSchema(SQLAlchemySchema):
"""Sla Miss Schema."""
class Meta:
"""Meta."""
model = Job
id = auto_field(dump_only=True)
dag_id = auto_field(dump_only=True)
state = auto_field(dump_only=True)
job_type = auto_field(dump_only=True)
start_date = auto_field(dump_only=True)
end_date = auto_field(dump_only=True)
latest_heartbeat = auto_field(dump_only=True)
executor_class = auto_field(dump_only=True)
hostname = auto_field(dump_only=True)
unixname = auto_field(dump_only=True)
| 1,468 | 33.162791 | 63 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/plugin_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
class PluginSchema(Schema):
"""Plugin schema."""
name = fields.String()
hooks = fields.List(fields.String())
executors = fields.List(fields.String())
macros = fields.List(fields.Dict())
flask_blueprints = fields.List(fields.Dict())
appbuilder_views = fields.List(fields.Dict())
appbuilder_menu_items = fields.List(fields.Dict())
global_operator_extra_links = fields.List(fields.Dict())
operator_extra_links = fields.List(fields.Dict())
source = fields.String()
class PluginCollection(NamedTuple):
"""Plugin List."""
plugins: list
total_entries: int
class PluginCollectionSchema(Schema):
"""Plugin Collection List."""
plugins = fields.List(fields.Nested(PluginSchema))
total_entries = fields.Int()
plugin_schema = PluginSchema()
plugin_collection_schema = PluginCollectionSchema()
| 1,750 | 30.836364 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/config_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
class ConfigOptionSchema(Schema):
"""Config Option Schema."""
key = fields.String(required=True)
value = fields.String(required=True)
class ConfigOption(NamedTuple):
"""Config option."""
key: str
value: str
class ConfigSectionSchema(Schema):
"""Config Section Schema."""
name = fields.String(required=True)
options = fields.List(fields.Nested(ConfigOptionSchema))
class ConfigSection(NamedTuple):
"""List of config options within a section."""
name: str
options: list[ConfigOption]
class ConfigSchema(Schema):
"""Config Schema."""
sections = fields.List(fields.Nested(ConfigSectionSchema))
class Config(NamedTuple):
"""List of config sections with their options."""
sections: list[ConfigSection]
config_schema = ConfigSchema()
| 1,702 | 25.2 | 62 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/dag_warning_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.dagwarning import DagWarning
class DagWarningSchema(SQLAlchemySchema):
"""Import error schema."""
class Meta:
"""Meta."""
model = DagWarning
dag_id = auto_field(data_key="dag_id", dump_only=True)
warning_type = auto_field()
message = auto_field()
timestamp = auto_field(format="iso")
class DagWarningCollection(NamedTuple):
"""List of dag warnings with metadata."""
dag_warnings: list[DagWarning]
total_entries: int
class DagWarningCollectionSchema(Schema):
"""Import error collection schema."""
dag_warnings = fields.List(fields.Nested(DagWarningSchema))
total_entries = fields.Int()
dag_warning_schema = DagWarningSchema()
dag_warning_collection_schema = DagWarningCollectionSchema()
| 1,739 | 29.526316 | 63 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/user_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.api_connexion.parameters import validate_istimezone
from airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema
from airflow.www.fab_security.sqla.models import User
class UserCollectionItemSchema(SQLAlchemySchema):
"""user collection item schema."""
class Meta:
"""Meta."""
model = User
dateformat = "iso"
first_name = auto_field()
last_name = auto_field()
username = auto_field()
active = auto_field(dump_only=True)
email = auto_field()
last_login = auto_field(dump_only=True)
login_count = auto_field(dump_only=True)
fail_login_count = auto_field(dump_only=True)
roles = fields.List(fields.Nested(RoleSchema, only=("name",)))
created_on = auto_field(validate=validate_istimezone, dump_only=True)
changed_on = auto_field(validate=validate_istimezone, dump_only=True)
class UserSchema(UserCollectionItemSchema):
"""User schema."""
password = auto_field(load_only=True)
class UserCollection(NamedTuple):
"""User collection."""
users: list[User]
total_entries: int
class UserCollectionSchema(Schema):
"""User collection schema."""
users = fields.List(fields.Nested(UserCollectionItemSchema))
total_entries = fields.Int()
user_collection_item_schema = UserCollectionItemSchema()
user_schema = UserSchema()
user_collection_schema = UserCollectionSchema()
| 2,365 | 30.972973 | 79 |
py
|
airflow
|
airflow-main/airflow/api_connexion/schemas/event_log_schema.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.log import Log
class EventLogSchema(SQLAlchemySchema):
"""Event log schema."""
class Meta:
"""Meta."""
model = Log
id = auto_field(data_key="event_log_id", dump_only=True)
dttm = auto_field(data_key="when", dump_only=True)
dag_id = auto_field(dump_only=True)
task_id = auto_field(dump_only=True)
event = auto_field(dump_only=True)
execution_date = auto_field(dump_only=True)
owner = auto_field(dump_only=True)
extra = auto_field(dump_only=True)
class EventLogCollection(NamedTuple):
"""List of import errors with metadata."""
event_logs: list[Log]
total_entries: int
class EventLogCollectionSchema(Schema):
"""EventLog Collection Schema."""
event_logs = fields.List(fields.Nested(EventLogSchema))
total_entries = fields.Int()
event_log_schema = EventLogSchema()
event_log_collection_schema = EventLogCollectionSchema()
| 1,888 | 29.967213 | 63 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/provider_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import re2
from airflow.api_connexion import security
from airflow.api_connexion.schemas.provider_schema import (
Provider,
ProviderCollection,
provider_collection_schema,
)
from airflow.api_connexion.types import APIResponse
from airflow.providers_manager import ProviderInfo, ProvidersManager
from airflow.security import permissions
def _remove_rst_syntax(value: str) -> str:
return re2.sub("[`_<>]", "", value.strip(" \n."))
def _provider_mapper(provider: ProviderInfo) -> Provider:
return Provider(
package_name=provider.data["package-name"],
description=_remove_rst_syntax(provider.data["description"]),
version=provider.version,
)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER)])
def get_providers() -> APIResponse:
"""Get providers."""
providers = [_provider_mapper(d) for d in ProvidersManager().providers.values()]
total_entries = len(providers)
return provider_collection_schema.dump(
ProviderCollection(providers=providers, total_entries=total_entries)
)
| 1,912 | 35.788462 | 89 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/dag_warning_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import func, select
from sqlalchemy.orm import Session
from airflow.api_connexion import security
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.dag_warning_schema import (
DagWarningCollection,
dag_warning_collection_schema,
)
from airflow.api_connexion.types import APIResponse
from airflow.models.dagwarning import DagWarning as DagWarningModel
from airflow.security import permissions
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING)])
@format_parameters({"limit": check_limit})
@provide_session
def get_dag_warnings(
*,
limit: int,
dag_id: str | None = None,
warning_type: str | None = None,
offset: int | None = None,
order_by: str = "timestamp",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get DAG warnings.
:param dag_id: the dag_id to optionally filter by
:param warning_type: the warning type to optionally filter by
"""
allowed_filter_attrs = ["dag_id", "warning_type", "message", "timestamp"]
query = select(DagWarningModel)
if dag_id:
query = query.where(DagWarningModel.dag_id == dag_id)
if warning_type:
query = query.where(DagWarningModel.warning_type == warning_type)
total_entries = session.scalar(select(func.count()).select_from(query))
query = apply_sorting(query=query, order_by=order_by, allowed_attrs=allowed_filter_attrs)
dag_warnings = session.scalars(query.offset(offset).limit(limit)).all()
return dag_warning_collection_schema.dump(
DagWarningCollection(dag_warnings=dag_warnings, total_entries=total_entries)
)
| 2,587 | 40.079365 | 93 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/role_and_permission_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from connexion import NoContent
from flask import request
from marshmallow import ValidationError
from sqlalchemy import asc, desc, func, select
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import check_limit, format_parameters
from airflow.api_connexion.schemas.role_and_permission_schema import (
ActionCollection,
RoleCollection,
action_collection_schema,
role_collection_schema,
role_schema,
)
from airflow.api_connexion.types import APIResponse, UpdateMask
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.www.fab_security.sqla.models import Action, Role
from airflow.www.security import AirflowSecurityManager
def _check_action_and_resource(sm: AirflowSecurityManager, perms: list[tuple[str, str]]) -> None:
"""
Checks if the action or resource exists and otherwise raise 400.
This function is intended for use in the REST API because it raise 400
"""
for action, resource in perms:
if not sm.get_action(action):
raise BadRequest(detail=f"The specified action: {action!r} was not found")
if not sm.get_resource(resource):
raise BadRequest(detail=f"The specified resource: {resource!r} was not found")
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE)])
def get_role(*, role_name: str) -> APIResponse:
"""Get role."""
ab_security_manager = get_airflow_app().appbuilder.sm
role = ab_security_manager.find_role(name=role_name)
if not role:
raise NotFound(title="Role not found", detail=f"Role with name {role_name!r} was not found")
return role_schema.dump(role)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE)])
@format_parameters({"limit": check_limit})
def get_roles(*, order_by: str = "name", limit: int, offset: int | None = None) -> APIResponse:
"""Get roles."""
appbuilder = get_airflow_app().appbuilder
session = appbuilder.get_session
total_entries = session.scalars(select(func.count(Role.id))).one()
direction = desc if order_by.startswith("-") else asc
to_replace = {"role_id": "id"}
order_param = order_by.strip("-")
order_param = to_replace.get(order_param, order_param)
allowed_filter_attrs = ["role_id", "name"]
if order_by not in allowed_filter_attrs:
raise BadRequest(
detail=f"Ordering with '{order_by}' is disallowed or "
f"the attribute does not exist on the model"
)
query = select(Role)
roles = (
session.scalars(query.order_by(direction(getattr(Role, order_param))).offset(offset).limit(limit))
.unique()
.all()
)
return role_collection_schema.dump(RoleCollection(roles=roles, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_ACTION)])
@format_parameters({"limit": check_limit})
def get_permissions(*, limit: int, offset: int | None = None) -> APIResponse:
"""Get permissions."""
session = get_airflow_app().appbuilder.get_session
total_entries = session.scalars(select(func.count(Action.id))).one()
query = select(Action)
actions = session.scalars(query.offset(offset).limit(limit)).all()
return action_collection_schema.dump(ActionCollection(actions=actions, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_ROLE)])
def delete_role(*, role_name: str) -> APIResponse:
"""Delete a role."""
ab_security_manager = get_airflow_app().appbuilder.sm
role = ab_security_manager.find_role(name=role_name)
if not role:
raise NotFound(title="Role not found", detail=f"Role with name {role_name!r} was not found")
ab_security_manager.delete_role(role_name=role_name)
return NoContent, HTTPStatus.NO_CONTENT
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE)])
def patch_role(*, role_name: str, update_mask: UpdateMask = None) -> APIResponse:
"""Update a role."""
appbuilder = get_airflow_app().appbuilder
security_manager = appbuilder.sm
body = request.json
try:
data = role_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
role = security_manager.find_role(name=role_name)
if not role:
raise NotFound(title="Role not found", detail=f"Role with name {role_name!r} was not found")
if update_mask:
update_mask = [i.strip() for i in update_mask]
data_ = {}
for field in update_mask:
if field in data and not field == "permissions":
data_[field] = data[field]
elif field == "actions":
data_["permissions"] = data["permissions"]
else:
raise BadRequest(detail=f"'{field}' in update_mask is unknown")
data = data_
if "permissions" in data:
perms = [(item["action"]["name"], item["resource"]["name"]) for item in data["permissions"] if item]
_check_action_and_resource(security_manager, perms)
security_manager.bulk_sync_roles([{"role": role_name, "perms": perms}])
new_name = data.get("name")
if new_name is not None and new_name != role.name:
security_manager.update_role(role_id=role.id, name=new_name)
return role_schema.dump(role)
@security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_ROLE)])
def post_role() -> APIResponse:
"""Create a new role."""
appbuilder = get_airflow_app().appbuilder
security_manager = appbuilder.sm
body = request.json
try:
data = role_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
role = security_manager.find_role(name=data["name"])
if not role:
perms = [(item["action"]["name"], item["resource"]["name"]) for item in data["permissions"] if item]
_check_action_and_resource(security_manager, perms)
security_manager.bulk_sync_roles([{"role": data["name"], "perms": perms}])
return role_schema.dump(role)
detail = f"Role with name {role.name!r} already exists; please update with the PATCH endpoint"
raise AlreadyExists(detail=detail)
| 7,259 | 42.214286 | 108 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/log_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from flask import Response, request
from itsdangerous.exc import BadSignature
from itsdangerous.url_safe import URLSafeSerializer
from sqlalchemy import select
from sqlalchemy.orm import Session, joinedload
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import BadRequest, NotFound
from airflow.api_connexion.schemas.log_schema import LogResponseObject, logs_schema
from airflow.api_connexion.types import APIResponse
from airflow.exceptions import TaskNotFound
from airflow.models import TaskInstance, Trigger
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.log.log_reader import TaskLogReader
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@provide_session
def get_log(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
task_try_number: int,
full_content: bool = False,
map_index: int = -1,
token: str | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get logs for specific task instance."""
key = get_airflow_app().config["SECRET_KEY"]
if not token:
metadata = {}
else:
try:
metadata = URLSafeSerializer(key).loads(token)
except BadSignature:
raise BadRequest("Bad Signature. Please use only the tokens provided by the API.")
if metadata.get("download_logs") and metadata["download_logs"]:
full_content = True
if full_content:
metadata["download_logs"] = True
else:
metadata["download_logs"] = False
task_log_reader = TaskLogReader()
if not task_log_reader.supports_read:
raise BadRequest("Task log handler does not support read logs.")
query = (
select(TaskInstance)
.where(
TaskInstance.task_id == task_id,
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == dag_run_id,
TaskInstance.map_index == map_index,
)
.join(TaskInstance.dag_run)
.options(joinedload(TaskInstance.trigger).joinedload(Trigger.triggerer_job))
)
ti = session.scalar(query)
if ti is None:
metadata["end_of_log"] = True
raise NotFound(title="TaskInstance not found")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
if dag:
try:
ti.task = dag.get_task(ti.task_id)
except TaskNotFound:
pass
return_type = request.accept_mimetypes.best_match(["text/plain", "application/json"])
# return_type would be either the above two or None
logs: Any
if return_type == "application/json" or return_type is None: # default
logs, metadata = task_log_reader.read_log_chunks(ti, task_try_number, metadata)
logs = logs[0] if task_try_number is not None else logs
# we must have token here, so we can safely ignore it
token = URLSafeSerializer(key).dumps(metadata) # type: ignore[assignment]
return logs_schema.dump(LogResponseObject(continuation_token=token, content=logs))
# text/plain. Stream
logs = task_log_reader.read_log_stream(ti, task_try_number, metadata)
return Response(logs, headers={"Content-Type": return_type})
| 4,317 | 35.905983 | 94 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/xcom_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import copy
from flask import g
from sqlalchemy import and_, func, select
from sqlalchemy.orm import Session
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import BadRequest, NotFound
from airflow.api_connexion.parameters import check_limit, format_parameters
from airflow.api_connexion.schemas.xcom_schema import XComCollection, xcom_collection_schema, xcom_schema
from airflow.api_connexion.types import APIResponse
from airflow.models import DagRun as DR, XCom
from airflow.security import permissions
from airflow.settings import conf
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
],
)
@format_parameters({"limit": check_limit})
@provide_session
def get_xcom_entries(
*,
dag_id: str,
dag_run_id: str,
task_id: str,
map_index: int | None = None,
xcom_key: str | None = None,
limit: int | None,
offset: int | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all XCom values."""
query = select(XCom)
if dag_id == "~":
appbuilder = get_airflow_app().appbuilder
readable_dag_ids = appbuilder.sm.get_readable_dag_ids(g.user)
query = query.where(XCom.dag_id.in_(readable_dag_ids))
query = query.join(DR, and_(XCom.dag_id == DR.dag_id, XCom.run_id == DR.run_id))
else:
query = query.where(XCom.dag_id == dag_id)
query = query.join(DR, and_(XCom.dag_id == DR.dag_id, XCom.run_id == DR.run_id))
if task_id != "~":
query = query.where(XCom.task_id == task_id)
if dag_run_id != "~":
query = query.where(DR.run_id == dag_run_id)
if map_index is not None:
query = query.where(XCom.map_index == map_index)
if xcom_key is not None:
query = query.where(XCom.key == xcom_key)
query = query.order_by(DR.execution_date, XCom.task_id, XCom.dag_id, XCom.key)
total_entries = session.execute(select(func.count()).select_from(query)).scalar()
query = session.scalars(query.offset(offset).limit(limit))
return xcom_collection_schema.dump(XComCollection(xcom_entries=query, total_entries=total_entries))
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
],
)
@provide_session
def get_xcom_entry(
*,
dag_id: str,
task_id: str,
dag_run_id: str,
xcom_key: str,
map_index: int = -1,
deserialize: bool = False,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get an XCom entry."""
if deserialize:
if not conf.getboolean("api", "enable_xcom_deserialize_support", fallback=False):
raise BadRequest(detail="XCom deserialization is disabled in configuration.")
query = select(XCom, XCom.value)
else:
query = select(XCom)
query = query.where(
XCom.dag_id == dag_id, XCom.task_id == task_id, XCom.key == xcom_key, XCom.map_index == map_index
)
query = query.join(DR, and_(XCom.dag_id == DR.dag_id, XCom.run_id == DR.run_id))
query = query.where(DR.run_id == dag_run_id)
if deserialize:
item = session.execute(query).one_or_none()
else:
item = session.scalars(query).one_or_none()
if item is None:
raise NotFound("XCom entry not found")
if deserialize:
xcom, value = item
stub = copy.copy(xcom)
stub.value = value
stub.value = XCom.deserialize_value(stub)
item = stub
return xcom_schema.dump(item)
| 4,860 | 35.825758 | 105 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/import_error_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import func, select
from sqlalchemy.orm import Session
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.error_schema import (
ImportErrorCollection,
import_error_collection_schema,
import_error_schema,
)
from airflow.api_connexion.types import APIResponse
from airflow.models.errors import ImportError as ImportErrorModel
from airflow.security import permissions
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR)])
@provide_session
def get_import_error(*, import_error_id: int, session: Session = NEW_SESSION) -> APIResponse:
"""Get an import error."""
error = session.get(ImportErrorModel, import_error_id)
if error is None:
raise NotFound(
"Import error not found",
detail=f"The ImportError with import_error_id: `{import_error_id}` was not found",
)
return import_error_schema.dump(error)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR)])
@format_parameters({"limit": check_limit})
@provide_session
def get_import_errors(
*,
limit: int,
offset: int | None = None,
order_by: str = "import_error_id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all import errors."""
to_replace = {"import_error_id": "id"}
allowed_filter_attrs = ["import_error_id", "timestamp", "filename"]
total_entries = session.scalars(func.count(ImportErrorModel.id)).one()
query = select(ImportErrorModel)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
import_errors = session.scalars(query.offset(offset).limit(limit)).all()
return import_error_collection_schema.dump(
ImportErrorCollection(import_errors=import_errors, total_entries=total_entries)
)
| 2,862 | 39.9 | 94 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/task_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from operator import attrgetter
from airflow import DAG
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import BadRequest, NotFound
from airflow.api_connexion.schemas.task_schema import TaskCollection, task_collection_schema, task_schema
from airflow.api_connexion.types import APIResponse
from airflow.exceptions import TaskNotFound
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
def get_task(*, dag_id: str, task_id: str) -> APIResponse:
"""Get simplified representation of a task."""
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found")
try:
task = dag.get_task(task_id=task_id)
except TaskNotFound:
raise NotFound("Task not found")
return task_schema.dump(task)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
def get_tasks(*, dag_id: str, order_by: str = "task_id") -> APIResponse:
"""Get tasks for DAG."""
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found")
tasks = dag.tasks
try:
tasks = sorted(tasks, key=attrgetter(order_by.lstrip("-")), reverse=(order_by[0:1] == "-"))
except AttributeError as err:
raise BadRequest(detail=str(err))
task_collection = TaskCollection(tasks=tasks, total_entries=len(tasks))
return task_collection_schema.dump(task_collection)
| 2,591 | 36.565217 | 105 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/user_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from connexion import NoContent
from flask import request
from marshmallow import ValidationError
from sqlalchemy import asc, desc, func, select
from werkzeug.security import generate_password_hash
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound, Unknown
from airflow.api_connexion.parameters import check_limit, format_parameters
from airflow.api_connexion.schemas.user_schema import (
UserCollection,
user_collection_item_schema,
user_collection_schema,
user_schema,
)
from airflow.api_connexion.types import APIResponse, UpdateMask
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.www.fab_security.sqla.models import Role, User
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])
def get_user(*, username: str) -> APIResponse:
"""Get a user."""
ab_security_manager = get_airflow_app().appbuilder.sm
user = ab_security_manager.find_user(username=username)
if not user:
raise NotFound(title="User not found", detail=f"The User with username `{username}` was not found")
return user_collection_item_schema.dump(user)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])
@format_parameters({"limit": check_limit})
def get_users(*, limit: int, order_by: str = "id", offset: str | None = None) -> APIResponse:
"""Get users."""
appbuilder = get_airflow_app().appbuilder
session = appbuilder.get_session
total_entries = session.execute(select(func.count(User.id))).scalar()
direction = desc if order_by.startswith("-") else asc
to_replace = {"user_id": "id"}
order_param = order_by.strip("-")
order_param = to_replace.get(order_param, order_param)
allowed_filter_attrs = [
"id",
"first_name",
"last_name",
"user_name",
"email",
"is_active",
"role",
]
if order_by not in allowed_filter_attrs:
raise BadRequest(
detail=f"Ordering with '{order_by}' is disallowed or "
f"the attribute does not exist on the model"
)
query = select(User).order_by(direction(getattr(User, order_param))).offset(offset).limit(limit)
users = session.scalars(query).all()
return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_USER)])
def post_user() -> APIResponse:
"""Create a new user."""
try:
data = user_schema.load(request.json)
except ValidationError as e:
raise BadRequest(detail=str(e.messages))
security_manager = get_airflow_app().appbuilder.sm
username = data["username"]
email = data["email"]
if security_manager.find_user(username=username):
detail = f"Username `{username}` already exists. Use PATCH to update."
raise AlreadyExists(detail=detail)
if security_manager.find_user(email=email):
detail = f"The email `{email}` is already taken."
raise AlreadyExists(detail=detail)
roles_to_add = []
missing_role_names = []
for role_data in data.pop("roles", ()):
role_name = role_data["name"]
role = security_manager.find_role(role_name)
if role is None:
missing_role_names.append(role_name)
else:
roles_to_add.append(role)
if missing_role_names:
detail = f"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}"
raise BadRequest(detail=detail)
if not roles_to_add: # No roles provided, use the F.A.B's default registered user role.
roles_to_add.append(security_manager.find_role(security_manager.auth_user_registration_role))
user = security_manager.add_user(role=roles_to_add, **data)
if not user:
detail = f"Failed to add user `{username}`."
raise Unknown(detail=detail)
return user_schema.dump(user)
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_USER)])
def patch_user(*, username: str, update_mask: UpdateMask = None) -> APIResponse:
"""Update a user."""
try:
data = user_schema.load(request.json)
except ValidationError as e:
raise BadRequest(detail=str(e.messages))
security_manager = get_airflow_app().appbuilder.sm
user = security_manager.find_user(username=username)
if user is None:
detail = f"The User with username `{username}` was not found"
raise NotFound(title="User not found", detail=detail)
# Check unique username
new_username = data.get("username")
if new_username and new_username != username:
if security_manager.find_user(username=new_username):
raise AlreadyExists(detail=f"The username `{new_username}` already exists")
# Check unique email
email = data.get("email")
if email and email != user.email:
if security_manager.find_user(email=email):
raise AlreadyExists(detail=f"The email `{email}` already exists")
# Get fields to update.
if update_mask is not None:
masked_data = {}
missing_mask_names = []
for field in update_mask:
field = field.strip()
try:
masked_data[field] = data[field]
except KeyError:
missing_mask_names.append(field)
if missing_mask_names:
detail = f"Unknown update masks: {', '.join(repr(n) for n in missing_mask_names)}"
raise BadRequest(detail=detail)
data = masked_data
roles_to_update: list[Role] | None
if "roles" in data:
roles_to_update = []
missing_role_names = []
for role_data in data.pop("roles", ()):
role_name = role_data["name"]
role = security_manager.find_role(role_name)
if role is None:
missing_role_names.append(role_name)
else:
roles_to_update.append(role)
if missing_role_names:
detail = f"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}"
raise BadRequest(detail=detail)
else:
roles_to_update = None # Don't change existing value.
if "password" in data:
user.password = generate_password_hash(data.pop("password"))
if roles_to_update is not None:
user.roles = roles_to_update
for key, value in data.items():
setattr(user, key, value)
security_manager.update_user(user)
return user_schema.dump(user)
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER)])
def delete_user(*, username: str) -> APIResponse:
"""Delete a user."""
security_manager = get_airflow_app().appbuilder.sm
user = security_manager.find_user(username=username)
if user is None:
detail = f"The User with username `{username}` was not found"
raise NotFound(title="User not found", detail=detail)
user.roles = [] # Clear foreign keys on this user first.
security_manager.get_session.delete(user)
security_manager.get_session.commit()
return NoContent, HTTPStatus.NO_CONTENT
| 8,100 | 37.393365 | 107 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/dag_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from typing import Collection
from connexion import NoContent
from flask import g, request
from marshmallow import ValidationError
from sqlalchemy import func, select, update
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import or_
from airflow import DAG
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.dag_schema import (
DAGCollection,
dag_detail_schema,
dag_schema,
dags_collection_schema,
)
from airflow.api_connexion.types import APIResponse, UpdateMask
from airflow.exceptions import AirflowException, DagNotFound
from airflow.models.dag import DagModel, DagTag
from airflow.security import permissions
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
@provide_session
def get_dag(*, dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get basic information about a DAG."""
dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id))
if dag is None:
raise NotFound("DAG not found", detail=f"The DAG with dag_id: {dag_id} was not found")
return dag_schema.dump(dag)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
def get_dag_details(*, dag_id: str) -> APIResponse:
"""Get details of DAG."""
dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)
if not dag:
raise NotFound("DAG not found", detail=f"The DAG with dag_id: {dag_id} was not found")
return dag_detail_schema.dump(dag)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)])
@format_parameters({"limit": check_limit})
@provide_session
def get_dags(
*,
limit: int,
offset: int = 0,
tags: Collection[str] | None = None,
dag_id_pattern: str | None = None,
only_active: bool = True,
paused: bool | None = None,
order_by: str = "dag_id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all DAGs."""
allowed_attrs = ["dag_id"]
dags_query = select(DagModel).where(~DagModel.is_subdag)
if only_active:
dags_query = dags_query.where(DagModel.is_active)
if paused is not None:
if paused:
dags_query = dags_query.where(DagModel.is_paused)
else:
dags_query = dags_query.where(~DagModel.is_paused)
if dag_id_pattern:
dags_query = dags_query.where(DagModel.dag_id.ilike(f"%{dag_id_pattern}%"))
readable_dags = get_airflow_app().appbuilder.sm.get_accessible_dag_ids(g.user)
dags_query = dags_query.where(DagModel.dag_id.in_(readable_dags))
if tags:
cond = [DagModel.tags.any(DagTag.name == tag) for tag in tags]
dags_query = dags_query.where(or_(*cond))
total_entries = session.scalar(select(func.count()).select_from(dags_query))
dags_query = apply_sorting(dags_query, order_by, {}, allowed_attrs)
dags = session.scalars(dags_query.offset(offset).limit(limit)).all()
return dags_collection_schema.dump(DAGCollection(dags=dags, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG)])
@provide_session
def patch_dag(*, dag_id: str, update_mask: UpdateMask = None, session: Session = NEW_SESSION) -> APIResponse:
"""Update the specific DAG."""
try:
patch_body = dag_schema.load(request.json, session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
patch_body_ = {}
if update_mask != ["is_paused"]:
raise BadRequest(detail="Only `is_paused` field can be updated through the REST API")
patch_body_[update_mask[0]] = patch_body[update_mask[0]]
patch_body = patch_body_
dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id))
if not dag:
raise NotFound(f"Dag with id: '{dag_id}' not found")
dag.is_paused = patch_body["is_paused"]
session.flush()
return dag_schema.dump(dag)
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG)])
@format_parameters({"limit": check_limit})
@provide_session
def patch_dags(limit, session, offset=0, only_active=True, tags=None, dag_id_pattern=None, update_mask=None):
"""Patch multiple DAGs."""
try:
patch_body = dag_schema.load(request.json, session=session)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
if update_mask:
patch_body_ = {}
if update_mask != ["is_paused"]:
raise BadRequest(detail="Only `is_paused` field can be updated through the REST API")
update_mask = update_mask[0]
patch_body_[update_mask] = patch_body[update_mask]
patch_body = patch_body_
if only_active:
dags_query = select(DagModel).where(~DagModel.is_subdag, DagModel.is_active)
else:
dags_query = select(DagModel).where(~DagModel.is_subdag)
if dag_id_pattern == "~":
dag_id_pattern = "%"
dags_query = dags_query.where(DagModel.dag_id.ilike(f"%{dag_id_pattern}%"))
editable_dags = get_airflow_app().appbuilder.sm.get_editable_dag_ids(g.user)
dags_query = dags_query.where(DagModel.dag_id.in_(editable_dags))
if tags:
cond = [DagModel.tags.any(DagTag.name == tag) for tag in tags]
dags_query = dags_query.where(or_(*cond))
total_entries = session.scalar(select(func.count()).select_from(dags_query))
dags = session.scalars(dags_query.order_by(DagModel.dag_id).offset(offset).limit(limit)).all()
dags_to_update = {dag.dag_id for dag in dags}
session.execute(
update(DagModel)
.where(DagModel.dag_id.in_(dags_to_update))
.values(is_paused=patch_body["is_paused"])
.execution_options(synchronize_session="fetch")
)
session.flush()
return dags_collection_schema.dump(DAGCollection(dags=dags, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG)])
@provide_session
def delete_dag(dag_id: str, session: Session = NEW_SESSION) -> APIResponse:
"""Delete the specific DAG."""
from airflow.api.common import delete_dag as delete_dag_module
try:
delete_dag_module.delete_dag(dag_id, session=session)
except DagNotFound:
raise NotFound(f"Dag with id: '{dag_id}' not found")
except AirflowException:
raise AlreadyExists(detail=f"Task instances of dag with id: '{dag_id}' are still running")
return NoContent, HTTPStatus.NO_CONTENT
| 7,663 | 38.709845 | 109 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/update_mask.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Mapping, Sequence
from airflow.api_connexion.exceptions import BadRequest
def extract_update_mask_data(
update_mask: Sequence[str], non_update_fields: list[str], data: Mapping[str, Any]
) -> Mapping[str, Any]:
extracted_data = {}
for field in update_mask:
field = field.strip()
if field in data and field not in non_update_fields:
extracted_data[field] = data[field]
else:
raise BadRequest(detail=f"'{field}' is unknown or cannot be updated.")
return extracted_data
| 1,378 | 38.4 | 85 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/event_log_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import func, select
from sqlalchemy.orm import Session
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.event_log_schema import (
EventLogCollection,
event_log_collection_schema,
event_log_schema,
)
from airflow.api_connexion.types import APIResponse
from airflow.models import Log
from airflow.security import permissions
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG)])
@provide_session
def get_event_log(*, event_log_id: int, session: Session = NEW_SESSION) -> APIResponse:
"""Get a log entry."""
event_log = session.get(Log, event_log_id)
if event_log is None:
raise NotFound("Event Log not found")
return event_log_schema.dump(event_log)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG)])
@format_parameters({"limit": check_limit})
@provide_session
def get_event_logs(
*,
limit: int,
offset: int | None = None,
order_by: str = "event_log_id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get all log entries from event log."""
to_replace = {"event_log_id": "id", "when": "dttm"}
allowed_filter_attrs = [
"event_log_id",
"when",
"dag_id",
"task_id",
"event",
"execution_date",
"owner",
"extra",
]
total_entries = session.scalars(func.count(Log.id)).one()
query = select(Log)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
event_logs = session.scalars(query.offset(offset).limit(limit)).all()
return event_log_collection_schema.dump(
EventLogCollection(event_logs=event_logs, total_entries=total_entries)
)
| 2,773 | 35.986667 | 90 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/dataset_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import func, select
from sqlalchemy.orm import Session, joinedload, subqueryload
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.dataset_schema import (
DatasetCollection,
DatasetEventCollection,
dataset_collection_schema,
dataset_event_collection_schema,
dataset_schema,
)
from airflow.api_connexion.types import APIResponse
from airflow.models.dataset import DatasetEvent, DatasetModel
from airflow.security import permissions
from airflow.utils.session import NEW_SESSION, provide_session
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET)])
@provide_session
def get_dataset(uri: str, session: Session = NEW_SESSION) -> APIResponse:
"""Get a Dataset."""
dataset = session.scalar(
select(DatasetModel)
.where(DatasetModel.uri == uri)
.options(joinedload(DatasetModel.consuming_dags), joinedload(DatasetModel.producing_tasks))
)
if not dataset:
raise NotFound(
"Dataset not found",
detail=f"The Dataset with uri: `{uri}` was not found",
)
return dataset_schema.dump(dataset)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET)])
@format_parameters({"limit": check_limit})
@provide_session
def get_datasets(
*,
limit: int,
offset: int = 0,
uri_pattern: str | None = None,
order_by: str = "id",
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get datasets."""
allowed_attrs = ["id", "uri", "created_at", "updated_at"]
total_entries = session.scalars(select(func.count(DatasetModel.id))).one()
query = select(DatasetModel)
if uri_pattern:
query = query.where(DatasetModel.uri.ilike(f"%{uri_pattern}%"))
query = apply_sorting(query, order_by, {}, allowed_attrs)
datasets = session.scalars(
query.options(subqueryload(DatasetModel.consuming_dags), subqueryload(DatasetModel.producing_tasks))
.offset(offset)
.limit(limit)
).all()
return dataset_collection_schema.dump(DatasetCollection(datasets=datasets, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET)])
@provide_session
@format_parameters({"limit": check_limit})
def get_dataset_events(
*,
limit: int,
offset: int = 0,
order_by: str = "timestamp",
dataset_id: int | None = None,
source_dag_id: str | None = None,
source_task_id: str | None = None,
source_run_id: str | None = None,
source_map_index: int | None = None,
session: Session = NEW_SESSION,
) -> APIResponse:
"""Get dataset events."""
allowed_attrs = ["source_dag_id", "source_task_id", "source_run_id", "source_map_index", "timestamp"]
query = select(DatasetEvent)
if dataset_id:
query = query.where(DatasetEvent.dataset_id == dataset_id)
if source_dag_id:
query = query.where(DatasetEvent.source_dag_id == source_dag_id)
if source_task_id:
query = query.where(DatasetEvent.source_task_id == source_task_id)
if source_run_id:
query = query.where(DatasetEvent.source_run_id == source_run_id)
if source_map_index:
query = query.where(DatasetEvent.source_map_index == source_map_index)
query = query.options(subqueryload(DatasetEvent.created_dagruns))
total_entries = session.scalar(select(func.count()).select_from(query))
query = apply_sorting(query, order_by, {}, allowed_attrs)
events = session.scalars(query.offset(offset).limit(limit)).all()
return dataset_event_collection_schema.dump(
DatasetEventCollection(dataset_events=events, total_entries=total_entries)
)
| 4,685 | 37.727273 | 108 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/variable_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from http import HTTPStatus
from flask import Response
from marshmallow import ValidationError
from sqlalchemy import func, select
from sqlalchemy.orm import Session
from airflow.api_connexion import security
from airflow.api_connexion.endpoints.request_dict import get_json_request_dict
from airflow.api_connexion.endpoints.update_mask import extract_update_mask_data
from airflow.api_connexion.exceptions import BadRequest, NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.variable_schema import variable_collection_schema, variable_schema
from airflow.api_connexion.types import UpdateMask
from airflow.models import Variable
from airflow.security import permissions
from airflow.utils.log.action_logger import action_event_from_permission
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.www.decorators import action_logging
RESOURCE_EVENT_PREFIX = "variable"
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE)])
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_DELETE,
),
)
def delete_variable(*, variable_key: str) -> Response:
"""Delete variable."""
if Variable.delete(variable_key) == 0:
raise NotFound("Variable not found")
return Response(status=HTTPStatus.NO_CONTENT)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE)])
@provide_session
def get_variable(*, variable_key: str, session: Session = NEW_SESSION) -> Response:
"""Get a variable by key."""
var = session.scalar(select(Variable).where(Variable.key == variable_key).limit(1))
if not var:
raise NotFound("Variable not found")
return variable_schema.dump(var)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE)])
@format_parameters({"limit": check_limit})
@provide_session
def get_variables(
*,
limit: int | None,
order_by: str = "id",
offset: int | None = None,
session: Session = NEW_SESSION,
) -> Response:
"""Get all variable values."""
total_entries = session.execute(select(func.count(Variable.id))).scalar()
to_replace = {"value": "val"}
allowed_filter_attrs = ["value", "key", "id"]
query = select(Variable)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
variables = session.scalars(query.offset(offset).limit(limit)).all()
return variable_collection_schema.dump(
{
"variables": variables,
"total_entries": total_entries,
}
)
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE)])
@provide_session
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_EDIT,
),
)
def patch_variable(
*,
variable_key: str,
update_mask: UpdateMask = None,
session: Session = NEW_SESSION,
) -> Response:
"""Update a variable by key."""
try:
data = variable_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest("Invalid Variable schema", detail=str(err.messages))
if data["key"] != variable_key:
raise BadRequest("Invalid post body", detail="key from request body doesn't match uri parameter")
non_update_fields = ["key"]
variable = session.scalar(select(Variable).filter_by(key=variable_key).limit(1))
if update_mask:
data = extract_update_mask_data(update_mask, non_update_fields, data)
for key, val in data.items():
setattr(variable, key, val)
session.add(variable)
return variable_schema.dump(variable)
@security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE)])
@action_logging(
event=action_event_from_permission(
prefix=RESOURCE_EVENT_PREFIX,
permission=permissions.ACTION_CAN_CREATE,
),
)
def post_variables() -> Response:
"""Create a variable."""
try:
data = variable_schema.load(get_json_request_dict())
except ValidationError as err:
raise BadRequest("Invalid Variable schema", detail=str(err.messages))
Variable.set(data["key"], data["val"])
return variable_schema.dump(data)
| 5,205 | 36.453237 | 105 |
py
|
airflow
|
airflow-main/airflow/api_connexion/endpoints/plugin_endpoint.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.api_connexion import security
from airflow.api_connexion.parameters import check_limit, format_parameters
from airflow.api_connexion.schemas.plugin_schema import PluginCollection, plugin_collection_schema
from airflow.api_connexion.types import APIResponse
from airflow.plugins_manager import get_plugin_info
from airflow.security import permissions
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN)])
@format_parameters({"limit": check_limit})
def get_plugins(*, limit: int, offset: int = 0) -> APIResponse:
"""Get plugins endpoint."""
plugins_info = get_plugin_info()
collection = PluginCollection(plugins=plugins_info[offset:][:limit], total_entries=len(plugins_info))
return plugin_collection_schema.dump(collection)
| 1,609 | 46.352941 | 105 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.