repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow
|
airflow-main/airflow/migrations/versions/0011_1_6_2_dagrun_start_end.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``start_date`` and ``end_date`` in ``dag_run`` table
Revision ID: 4446e08588
Revises: 561833c1c74b
Create Date: 2015-12-10 11:26:18.439223
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "4446e08588"
down_revision = "561833c1c74b"
branch_labels = None
depends_on = None
airflow_version = "1.6.2"
def upgrade():
op.add_column("dag_run", sa.Column("end_date", sa.DateTime(), nullable=True))
op.add_column("dag_run", sa.Column("start_date", sa.DateTime(), nullable=True))
def downgrade():
op.drop_column("dag_run", "start_date")
op.drop_column("dag_run", "end_date")
| 1,472 | 31.021739 | 83 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0039_1_10_2_add_superuser_field.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add superuser field
Revision ID: 41f5f12752f8
Revises: 03bc53e68815
Create Date: 2018-12-04 15:50:04.456875
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "41f5f12752f8"
down_revision = "03bc53e68815"
branch_labels = None
depends_on = None
airflow_version = "1.10.2"
def upgrade():
op.add_column("users", sa.Column("superuser", sa.Boolean(), default=False))
def downgrade():
op.drop_column("users", "superuser")
| 1,307 | 29.418605 | 79 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0114_2_4_0_add_dataset_model.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add Dataset model
Revision ID: 0038cd0c28b4
Revises: 44b7034f6bdc
Create Date: 2022-06-22 14:37:20.880672
"""
from __future__ import annotations
import sqlalchemy as sa
import sqlalchemy_jsonfield
from alembic import op
from sqlalchemy import Integer, String, func
from airflow.migrations.db_types import TIMESTAMP, StringID
from airflow.settings import json
revision = "0038cd0c28b4"
down_revision = "44b7034f6bdc"
branch_labels = None
depends_on = None
airflow_version = "2.4.0"
def _create_dataset_table():
op.create_table(
"dataset",
sa.Column("id", Integer, primary_key=True, autoincrement=True),
sa.Column(
"uri",
String(length=3000).with_variant(
String(
length=3000,
# latin1 allows for more indexed length in mysql
# and this field should only be ascii chars
collation="latin1_general_cs",
),
"mysql",
),
nullable=False,
),
sa.Column("extra", sqlalchemy_jsonfield.JSONField(json=json), nullable=False, default={}),
sa.Column("created_at", TIMESTAMP, nullable=False),
sa.Column("updated_at", TIMESTAMP, nullable=False),
sqlite_autoincrement=True, # ensures PK values not reused
)
op.create_index("idx_uri_unique", "dataset", ["uri"], unique=True)
def _create_dag_schedule_dataset_reference_table():
op.create_table(
"dag_schedule_dataset_reference",
sa.Column("dataset_id", Integer, primary_key=True, nullable=False),
sa.Column("dag_id", StringID(), primary_key=True, nullable=False),
sa.Column("created_at", TIMESTAMP, default=func.now, nullable=False),
sa.Column("updated_at", TIMESTAMP, default=func.now, nullable=False),
sa.ForeignKeyConstraint(
("dataset_id",),
["dataset.id"],
name="dsdr_dataset_fkey",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
columns=("dag_id",),
refcolumns=["dag.dag_id"],
name="dsdr_dag_id_fkey",
ondelete="CASCADE",
),
)
def _create_task_outlet_dataset_reference_table():
op.create_table(
"task_outlet_dataset_reference",
sa.Column("dataset_id", Integer, primary_key=True, nullable=False),
sa.Column("dag_id", StringID(), primary_key=True, nullable=False),
sa.Column("task_id", StringID(), primary_key=True, nullable=False),
sa.Column("created_at", TIMESTAMP, default=func.now, nullable=False),
sa.Column("updated_at", TIMESTAMP, default=func.now, nullable=False),
sa.ForeignKeyConstraint(
("dataset_id",),
["dataset.id"],
name="todr_dataset_fkey",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
columns=("dag_id",),
refcolumns=["dag.dag_id"],
name="todr_dag_id_fkey",
ondelete="CASCADE",
),
)
def _create_dataset_dag_run_queue_table():
op.create_table(
"dataset_dag_run_queue",
sa.Column("dataset_id", Integer, primary_key=True, nullable=False),
sa.Column("target_dag_id", StringID(), primary_key=True, nullable=False),
sa.Column("created_at", TIMESTAMP, default=func.now, nullable=False),
sa.ForeignKeyConstraint(
("dataset_id",),
["dataset.id"],
name="ddrq_dataset_fkey",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
("target_dag_id",),
["dag.dag_id"],
name="ddrq_dag_fkey",
ondelete="CASCADE",
),
)
def _create_dataset_event_table():
op.create_table(
"dataset_event",
sa.Column("id", Integer, primary_key=True, autoincrement=True),
sa.Column("dataset_id", Integer, nullable=False),
sa.Column("extra", sqlalchemy_jsonfield.JSONField(json=json), nullable=False, default={}),
sa.Column("source_task_id", String(250), nullable=True),
sa.Column("source_dag_id", String(250), nullable=True),
sa.Column("source_run_id", String(250), nullable=True),
sa.Column("source_map_index", sa.Integer(), nullable=True, server_default="-1"),
sa.Column("timestamp", TIMESTAMP, nullable=False),
sqlite_autoincrement=True, # ensures PK values not reused
)
op.create_index("idx_dataset_id_timestamp", "dataset_event", ["dataset_id", "timestamp"])
def _create_dataset_event_dag_run_table():
op.create_table(
"dagrun_dataset_event",
sa.Column("dag_run_id", sa.Integer(), nullable=False),
sa.Column("event_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["dag_run_id"],
["dag_run.id"],
name=op.f("dagrun_dataset_events_dag_run_id_fkey"),
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["event_id"],
["dataset_event.id"],
name=op.f("dagrun_dataset_events_event_id_fkey"),
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("dag_run_id", "event_id", name=op.f("dagrun_dataset_events_pkey")),
)
with op.batch_alter_table("dagrun_dataset_event") as batch_op:
batch_op.create_index("idx_dagrun_dataset_events_dag_run_id", ["dag_run_id"], unique=False)
batch_op.create_index("idx_dagrun_dataset_events_event_id", ["event_id"], unique=False)
def upgrade():
"""Apply Add Dataset model"""
_create_dataset_table()
_create_dag_schedule_dataset_reference_table()
_create_task_outlet_dataset_reference_table()
_create_dataset_dag_run_queue_table()
_create_dataset_event_table()
_create_dataset_event_dag_run_table()
def downgrade():
"""Unapply Add Dataset model"""
op.drop_table("dag_schedule_dataset_reference")
op.drop_table("task_outlet_dataset_reference")
op.drop_table("dataset_dag_run_queue")
op.drop_table("dagrun_dataset_event")
op.drop_table("dataset_event")
op.drop_table("dataset")
| 6,943 | 35.356021 | 99 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0079_2_0_2_increase_size_of_connection_extra_field_.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Increase size of ``connection.extra`` field to handle multiple RSA keys
Revision ID: 449b4072c2da
Revises: 82b7c48c147f
Create Date: 2020-03-16 19:02:55.337710
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "449b4072c2da"
down_revision = "82b7c48c147f"
branch_labels = None
depends_on = None
airflow_version = "2.0.2"
def upgrade():
"""Apply increase_length_for_connection_password"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column(
"extra",
existing_type=sa.VARCHAR(length=5000),
type_=sa.TEXT(),
existing_nullable=True,
)
def downgrade():
"""Unapply increase_length_for_connection_password"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column(
"extra",
existing_type=sa.TEXT(),
type_=sa.VARCHAR(length=5000),
existing_nullable=True,
)
| 1,849 | 30.896552 | 74 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0100_2_3_0_add_taskmap_and_map_id_on_taskinstance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``map_index`` column to TaskInstance to identify task-mapping,
and a ``task_map`` table to track mapping values from XCom.
Revision ID: e655c0453f75
Revises: f9da662e7089
Create Date: 2021-12-13 22:59:41.052584
"""
from __future__ import annotations
from alembic import op
from sqlalchemy import CheckConstraint, Column, ForeignKeyConstraint, Integer, text
from airflow.models.base import StringID
from airflow.utils.sqlalchemy import ExtendedJSON
# Revision identifiers, used by Alembic.
revision = "e655c0453f75"
down_revision = "f9da662e7089"
branch_labels = None
depends_on = None
airflow_version = "2.3.0"
def upgrade():
"""
Add ``map_index`` column to TaskInstance to identify task-mapping,
and a ``task_map`` table to track mapping values from XCom.
"""
# We need to first remove constraints on task_reschedule since they depend on task_instance.
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.drop_constraint("task_reschedule_ti_fkey", type_="foreignkey")
batch_op.drop_index("idx_task_reschedule_dag_task_run")
# Change task_instance's primary key.
with op.batch_alter_table("task_instance") as batch_op:
# I think we always use this name for TaskInstance after 7b2661a43ba3?
batch_op.drop_constraint("task_instance_pkey", type_="primary")
batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1")))
batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id", "map_index"])
# Re-create task_reschedule's constraints.
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1")))
batch_op.create_foreign_key(
"task_reschedule_ti_fkey",
"task_instance",
["dag_id", "task_id", "run_id", "map_index"],
["dag_id", "task_id", "run_id", "map_index"],
ondelete="CASCADE",
)
batch_op.create_index(
"idx_task_reschedule_dag_task_run",
["dag_id", "task_id", "run_id", "map_index"],
unique=False,
)
# Create task_map.
op.create_table(
"task_map",
Column("dag_id", StringID(), primary_key=True),
Column("task_id", StringID(), primary_key=True),
Column("run_id", StringID(), primary_key=True),
Column("map_index", Integer, primary_key=True),
Column("length", Integer, nullable=False),
Column("keys", ExtendedJSON, nullable=True),
CheckConstraint("length >= 0", name="task_map_length_not_negative"),
ForeignKeyConstraint(
["dag_id", "task_id", "run_id", "map_index"],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_map_task_instance_fkey",
ondelete="CASCADE",
),
)
def downgrade():
"""Remove TaskMap and map_index on TaskInstance."""
op.drop_table("task_map")
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.drop_constraint("task_reschedule_ti_fkey", type_="foreignkey")
batch_op.drop_index("idx_task_reschedule_dag_task_run")
batch_op.drop_column("map_index", mssql_drop_default=True)
op.execute("DELETE FROM task_instance WHERE map_index != -1")
with op.batch_alter_table("task_instance") as batch_op:
batch_op.drop_constraint("task_instance_pkey", type_="primary")
batch_op.drop_column("map_index", mssql_drop_default=True)
batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id"])
with op.batch_alter_table("task_reschedule") as batch_op:
batch_op.create_foreign_key(
"task_reschedule_ti_fkey",
"task_instance",
["dag_id", "task_id", "run_id"],
["dag_id", "task_id", "run_id"],
ondelete="CASCADE",
)
batch_op.create_index(
"idx_task_reschedule_dag_task_run",
["dag_id", "task_id", "run_id"],
unique=False,
)
| 5,032 | 38.629921 | 103 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0053_1_10_10_add_rendered_task_instance_fields_table.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``RenderedTaskInstanceFields`` table
Revision ID: 852ae6c715af
Revises: a4c2fd67d16b
Create Date: 2020-03-10 22:19:18.034961
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy import text
from airflow.migrations.db_types import StringID
# revision identifiers, used by Alembic.
revision = "852ae6c715af"
down_revision = "a4c2fd67d16b"
branch_labels = None
depends_on = None
airflow_version = "1.10.10"
TABLE_NAME = "rendered_task_instance_fields"
def upgrade():
"""Apply Add ``RenderedTaskInstanceFields`` table"""
json_type = sa.JSON
conn = op.get_bind()
if conn.dialect.name != "postgresql":
# Mysql 5.7+/MariaDB 10.2.3 has JSON support. Rather than checking for
# versions, check for the function existing.
try:
conn.execute(text("SELECT JSON_VALID(1)")).fetchone()
except (sa.exc.OperationalError, sa.exc.ProgrammingError):
json_type = sa.Text
op.create_table(
TABLE_NAME,
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("task_id", StringID(), nullable=False),
sa.Column("execution_date", sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column("rendered_fields", json_type(), nullable=False),
sa.PrimaryKeyConstraint("dag_id", "task_id", "execution_date"),
)
def downgrade():
"""Drop RenderedTaskInstanceFields table"""
op.drop_table(TABLE_NAME)
| 2,255 | 31.695652 | 81 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0020_1_7_1_xcom_dag_task_indices.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add indices on ``xcom`` table
Revision ID: 8504051e801b
Revises: 4addfa1236f1
Create Date: 2016-11-29 08:13:03.253312
"""
from __future__ import annotations
from alembic import op
# revision identifiers, used by Alembic.
revision = "8504051e801b"
down_revision = "4addfa1236f1"
branch_labels = None
depends_on = None
airflow_version = "1.7.1.3"
def upgrade():
"""Create Index."""
op.create_index("idx_xcom_dag_task_date", "xcom", ["dag_id", "task_id", "execution_date"], unique=False)
def downgrade():
"""Drop Index."""
op.drop_index("idx_xcom_dag_task_date", table_name="xcom")
| 1,393 | 29.977778 | 108 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0089_2_2_0_make_xcom_pkey_columns_non_nullable.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Make XCom primary key columns non-nullable
Revision ID: e9304a3141f0
Revises: 83f031fd9f1c
Create Date: 2021-04-06 13:22:02.197726
"""
from __future__ import annotations
from alembic import op
from airflow.migrations.db_types import TIMESTAMP, StringID
# revision identifiers, used by Alembic.
revision = "e9304a3141f0"
down_revision = "83f031fd9f1c"
branch_labels = None
depends_on = None
airflow_version = "2.2.0"
def upgrade():
"""Apply Make XCom primary key columns non-nullable"""
conn = op.get_bind()
with op.batch_alter_table("xcom") as bop:
bop.alter_column("key", type_=StringID(length=512), nullable=False)
bop.alter_column("execution_date", type_=TIMESTAMP, nullable=False)
if conn.dialect.name == "mssql":
bop.create_primary_key(
constraint_name="pk_xcom", columns=["dag_id", "task_id", "key", "execution_date"]
)
def downgrade():
"""Unapply Make XCom primary key columns non-nullable"""
conn = op.get_bind()
with op.batch_alter_table("xcom") as bop:
# regardless of what the model defined, the `key` and `execution_date`
# columns were always non-nullable for mysql, sqlite and postgres, so leave them alone
if conn.dialect.name == "mssql":
bop.drop_constraint("pk_xcom", type_="primary")
# execution_date and key wasn't nullable in the other databases
bop.alter_column("key", type_=StringID(length=512), nullable=True)
bop.alter_column("execution_date", type_=TIMESTAMP, nullable=True)
| 2,360 | 36.47619 | 97 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0023_1_8_2_add_max_tries_column_to_task_instance.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``max_tries`` column to ``task_instance``
Revision ID: cc1e65623dc7
Revises: 127d2bf2dfa7
Create Date: 2017-06-19 16:53:12.851141
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy import Column, Integer, String, inspect
from sqlalchemy.orm import declarative_base
from airflow import settings
from airflow.models import DagBag
# revision identifiers, used by Alembic.
revision = "cc1e65623dc7"
down_revision = "127d2bf2dfa7"
branch_labels = None
depends_on = None
airflow_version = "1.8.2"
Base = declarative_base()
BATCH_SIZE = 5000
class TaskInstance(Base): # type: ignore
"""Task Instance class."""
__tablename__ = "task_instance"
task_id = Column(String(), primary_key=True)
dag_id = Column(String(), primary_key=True)
execution_date = Column(sa.DateTime, primary_key=True)
max_tries = Column(Integer)
try_number = Column(Integer, default=0)
def upgrade():
op.add_column("task_instance", sa.Column("max_tries", sa.Integer, server_default="-1"))
# Check if table task_instance exist before data migration. This check is
# needed for database that does not create table until migration finishes.
# Checking task_instance table exists prevent the error of querying
# non-existing task_instance table.
connection = op.get_bind()
inspector = inspect(connection)
tables = inspector.get_table_names()
if "task_instance" in tables:
# Get current session
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
if not bool(session.query(TaskInstance).first()):
return
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(TaskInstance.max_tries == -1)
# Separate db query in batch to prevent loading entire table
# into memory and cause out of memory error.
while query.scalar():
tis = session.query(TaskInstance).filter(TaskInstance.max_tries == -1).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
# task_instance table might not have the up-to-date
# information, i.e dag or task might be modified or
# deleted in dagbag but is reflected in task instance
# table. In this case we do not retry the task that can't
# be parsed.
ti.max_tries = ti.try_number
else:
task = dag.get_task(ti.task_id)
if task.retries:
ti.max_tries = task.retries
else:
ti.max_tries = ti.try_number
session.merge(ti)
session.commit()
# Commit the current session.
session.commit()
def downgrade():
engine = settings.engine
connection = op.get_bind()
if engine.dialect.has_table(connection, "task_instance"):
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(TaskInstance.max_tries != -1)
while query.scalar():
tis = session.query(TaskInstance).filter(TaskInstance.max_tries != -1).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
ti.try_number = 0
else:
task = dag.get_task(ti.task_id)
# max_tries - try_number is number of times a task instance
# left to retry by itself. So the current try_number should be
# max number of self retry (task.retries) minus number of
# times left for task instance to try the task.
ti.try_number = max(0, task.retries - (ti.max_tries - ti.try_number))
ti.max_tries = -1
session.merge(ti)
session.commit()
session.commit()
op.drop_column("task_instance", "max_tries")
| 5,083 | 39.031496 | 106 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0072_2_0_0_add_k8s_yaml_to_rendered_templates.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add-k8s-yaml-to-rendered-templates
Revision ID: 45ba3f1493b9
Revises: 364159666cbd
Create Date: 2020-10-23 23:01:52.471442
"""
from __future__ import annotations
import sqlalchemy_jsonfield
from alembic import op
from sqlalchemy import Column
from airflow.settings import json
# revision identifiers, used by Alembic.
revision = "45ba3f1493b9"
down_revision = "364159666cbd"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
__tablename__ = "rendered_task_instance_fields"
k8s_pod_yaml = Column("k8s_pod_yaml", sqlalchemy_jsonfield.JSONField(json=json), nullable=True)
def upgrade():
"""Apply add-k8s-yaml-to-rendered-templates"""
with op.batch_alter_table(__tablename__, schema=None) as batch_op:
batch_op.add_column(k8s_pod_yaml)
def downgrade():
"""Unapply add-k8s-yaml-to-rendered-templates"""
with op.batch_alter_table(__tablename__, schema=None) as batch_op:
batch_op.drop_column("k8s_pod_yaml")
| 1,749 | 31.407407 | 95 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0059_2_0_0_drop_user_and_chart.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Drop ``user`` and ``chart`` table
Revision ID: cf5dc11e79ad
Revises: 03afc6b6f902
Create Date: 2019-01-24 15:30:35.834740
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy import inspect, text
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "cf5dc11e79ad"
down_revision = "03afc6b6f902"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happening to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "known_event" in tables:
for fkey in inspector.get_foreign_keys(table_name="known_event", referred_table="users"):
if fkey["name"]:
with op.batch_alter_table(table_name="known_event") as bop:
bop.drop_constraint(fkey["name"], type_="foreignkey")
if "chart" in tables:
op.drop_table(
"chart",
)
if "users" in tables:
op.drop_table("users")
def downgrade():
conn = op.get_bind()
op.create_table(
"users",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("username", sa.String(length=250), nullable=True),
sa.Column("email", sa.String(length=500), nullable=True),
sa.Column("password", sa.String(255)),
sa.Column("superuser", sa.Boolean(), default=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("username"),
)
op.create_table(
"chart",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("label", sa.String(length=200), nullable=True),
sa.Column("conn_id", sa.String(length=250), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("chart_type", sa.String(length=100), nullable=True),
sa.Column("sql_layout", sa.String(length=50), nullable=True),
sa.Column("sql", sa.Text(), nullable=True),
sa.Column("y_log_scale", sa.Boolean(), nullable=True),
sa.Column("show_datatable", sa.Boolean(), nullable=True),
sa.Column("show_sql", sa.Boolean(), nullable=True),
sa.Column("height", sa.Integer(), nullable=True),
sa.Column("default_params", sa.String(length=5000), nullable=True),
sa.Column("x_is_date", sa.Boolean(), nullable=True),
sa.Column("iteration_no", sa.Integer(), nullable=True),
sa.Column("last_modified", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
)
if conn.dialect.name == "mysql":
conn.execute(text("SET time_zone = '+00:00'"))
op.alter_column(table_name="chart", column_name="last_modified", type_=mysql.TIMESTAMP(fsp=6))
else:
if conn.dialect.name in ("sqlite", "mssql"):
return
if conn.dialect.name == "postgresql":
conn.execute(text("set timezone=UTC"))
op.alter_column(table_name="chart", column_name="last_modified", type_=sa.TIMESTAMP(timezone=True))
| 4,163 | 35.526316 | 107 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0012_1_7_0_add_notification_sent_column_to_sla_miss.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``notification_sent`` column to ``sla_miss`` table
Revision ID: bbc73705a13e
Revises: 4446e08588
Create Date: 2016-01-14 18:05:54.871682
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "bbc73705a13e"
down_revision = "4446e08588"
branch_labels = None
depends_on = None
airflow_version = "1.7.0"
def upgrade():
op.add_column("sla_miss", sa.Column("notification_sent", sa.Boolean, default=False))
def downgrade():
op.drop_column("sla_miss", "notification_sent")
| 1,359 | 29.909091 | 88 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0010_1_6_2_add_password_column_to_user.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``password`` column to ``user`` table
Revision ID: 561833c1c74b
Revises: 40e67319e3a9
Create Date: 2015-11-30 06:51:25.872557
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "561833c1c74b"
down_revision = "40e67319e3a9"
branch_labels = None
depends_on = None
airflow_version = "1.6.2"
def upgrade():
op.add_column("user", sa.Column("password", sa.String(255)))
def downgrade():
op.drop_column("user", "password")
| 1,313 | 28.863636 | 64 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0022_1_7_1_add_dag_id_state_index_on_dag_run_table.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``dag_id``/``state`` index on ``dag_run`` table
Revision ID: 127d2bf2dfa7
Revises: 5e7d17757c7a
Create Date: 2017-01-25 11:43:51.635667
"""
from __future__ import annotations
from alembic import op
# revision identifiers, used by Alembic.
revision = "127d2bf2dfa7"
down_revision = "5e7d17757c7a"
branch_labels = None
depends_on = None
airflow_version = "1.7.1.3"
def upgrade():
op.create_index("dag_id_state", "dag_run", ["dag_id", "state"], unique=False)
def downgrade():
op.drop_index("dag_id_state", table_name="dag_run")
| 1,335 | 30.069767 | 81 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0083_2_1_0_add_description_field_to_variable.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add description field to ``Variable`` model
Revision ID: e165e7455d70
Revises: 90d1635d7b86
Create Date: 2021-04-11 22:28:02.107290
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "e165e7455d70"
down_revision = "90d1635d7b86"
branch_labels = None
depends_on = None
airflow_version = "2.1.0"
def upgrade():
"""Apply Add description field to ``Variable`` model"""
with op.batch_alter_table("variable", schema=None) as batch_op:
batch_op.add_column(sa.Column("description", sa.Text(), nullable=True))
def downgrade():
"""Unapply Add description field to ``Variable`` model"""
with op.batch_alter_table("variable", schema=None) as batch_op:
batch_op.drop_column("description")
| 1,593 | 32.208333 | 79 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0052_1_10_10_add_pool_slots_field_to_task_instance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``pool_slots`` field to ``task_instance``
Revision ID: a4c2fd67d16b
Revises: 7939bcff74ba
Create Date: 2020-01-14 03:35:01.161519
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "a4c2fd67d16b"
down_revision = "7939bcff74ba"
branch_labels = None
depends_on = None
airflow_version = "1.10.10"
def upgrade():
op.add_column("task_instance", sa.Column("pool_slots", sa.Integer, default=1))
def downgrade():
op.drop_column("task_instance", "pool_slots")
| 1,348 | 29.659091 | 82 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0021_1_7_1_add_pid_field_to_taskinstance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``pid`` field to ``TaskInstance``
Revision ID: 5e7d17757c7a
Revises: 8504051e801b
Create Date: 2016-12-07 15:51:37.119478
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "5e7d17757c7a"
down_revision = "8504051e801b"
branch_labels = None
depends_on = None
airflow_version = "1.7.1.3"
def upgrade():
"""Add pid column to task_instance table."""
op.add_column("task_instance", sa.Column("pid", sa.Integer))
def downgrade():
"""Drop pid column from task_instance table."""
op.drop_column("task_instance", "pid")
| 1,416 | 29.804348 | 64 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0035_1_10_2_add_idx_log_dag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add index on ``log`` table
Revision ID: dd25f486b8ea
Revises: 9635ae0956e7
Create Date: 2018-08-07 06:41:41.028249
"""
from __future__ import annotations
from alembic import op
# revision identifiers, used by Alembic.
revision = "dd25f486b8ea"
down_revision = "9635ae0956e7"
branch_labels = None
depends_on = None
airflow_version = "1.10.2"
def upgrade():
op.create_index("idx_log_dag", "log", ["dag_id"], unique=False)
def downgrade():
op.drop_index("idx_log_dag", table_name="log")
| 1,290 | 29.023256 | 67 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0046_1_10_5_change_datetime_to_datetime2_6_on_mssql_.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""change datetime to datetime2(6) on MSSQL tables.
Revision ID: 74effc47d867
Revises: 6e96a59344a4
Create Date: 2019-08-01 15:19:57.585620
"""
from __future__ import annotations
from collections import defaultdict
from alembic import op
from sqlalchemy import text
from sqlalchemy.dialects import mssql
# revision identifiers, used by Alembic.
revision = "74effc47d867"
down_revision = "6e96a59344a4"
branch_labels = None
depends_on = None
airflow_version = "1.10.5"
def upgrade():
"""Change datetime to datetime2(6) when using MSSQL as backend."""
conn = op.get_bind()
if conn.dialect.name == "mssql":
result = conn.execute(
text(
"""SELECT CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '8%' THEN '2000' WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '9%' THEN '2005' ELSE '2005Plus' END AS MajorVersion"""
)
).fetchone()
mssql_version = result[0]
if mssql_version in ("2000", "2005"):
return
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.drop_index("idx_task_reschedule_dag_task_date")
task_reschedule_batch_op.drop_constraint("task_reschedule_dag_task_date_fkey", type_="foreignkey")
task_reschedule_batch_op.alter_column(
column_name="execution_date",
type_=mssql.DATETIME2(precision=6),
nullable=False,
)
task_reschedule_batch_op.alter_column(
column_name="start_date", type_=mssql.DATETIME2(precision=6)
)
task_reschedule_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME2(precision=6))
task_reschedule_batch_op.alter_column(
column_name="reschedule_date", type_=mssql.DATETIME2(precision=6)
)
with op.batch_alter_table("task_instance") as task_instance_batch_op:
task_instance_batch_op.drop_index("ti_state_lkp")
task_instance_batch_op.drop_index("ti_dag_date")
modify_execution_date_with_constraint(
conn, task_instance_batch_op, "task_instance", mssql.DATETIME2(precision=6), False
)
task_instance_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME2(precision=6))
task_instance_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME2(precision=6))
task_instance_batch_op.alter_column(column_name="queued_dttm", type_=mssql.DATETIME2(precision=6))
task_instance_batch_op.create_index(
"ti_state_lkp", ["dag_id", "task_id", "execution_date"], unique=False
)
task_instance_batch_op.create_index("ti_dag_date", ["dag_id", "execution_date"], unique=False)
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.create_foreign_key(
"task_reschedule_dag_task_date_fkey",
"task_instance",
["task_id", "dag_id", "execution_date"],
["task_id", "dag_id", "execution_date"],
ondelete="CASCADE",
)
task_reschedule_batch_op.create_index(
"idx_task_reschedule_dag_task_date", ["dag_id", "task_id", "execution_date"], unique=False
)
with op.batch_alter_table("dag_run") as dag_run_batch_op:
modify_execution_date_with_constraint(
conn, dag_run_batch_op, "dag_run", mssql.DATETIME2(precision=6), None
)
dag_run_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME2(precision=6))
dag_run_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="log", column_name="execution_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="log", column_name="dttm", type_=mssql.DATETIME2(precision=6))
with op.batch_alter_table("sla_miss") as sla_miss_batch_op:
modify_execution_date_with_constraint(
conn, sla_miss_batch_op, "sla_miss", mssql.DATETIME2(precision=6), False
)
sla_miss_batch_op.alter_column(column_name="timestamp", type_=mssql.DATETIME2(precision=6))
op.drop_index("idx_task_fail_dag_task_date", table_name="task_fail")
op.alter_column(
table_name="task_fail", column_name="execution_date", type_=mssql.DATETIME2(precision=6)
)
op.alter_column(table_name="task_fail", column_name="start_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="task_fail", column_name="end_date", type_=mssql.DATETIME2(precision=6))
op.create_index(
"idx_task_fail_dag_task_date", "task_fail", ["dag_id", "task_id", "execution_date"], unique=False
)
op.drop_index("idx_xcom_dag_task_date", table_name="xcom")
op.alter_column(table_name="xcom", column_name="execution_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="xcom", column_name="timestamp", type_=mssql.DATETIME2(precision=6))
op.create_index(
"idx_xcom_dag_task_date", "xcom", ["dag_id", "task_id", "execution_date"], unique=False
)
op.alter_column(
table_name="dag", column_name="last_scheduler_run", type_=mssql.DATETIME2(precision=6)
)
op.alter_column(table_name="dag", column_name="last_pickled", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="dag", column_name="last_expired", type_=mssql.DATETIME2(precision=6))
op.alter_column(
table_name="dag_pickle", column_name="created_dttm", type_=mssql.DATETIME2(precision=6)
)
op.alter_column(
table_name="import_error", column_name="timestamp", type_=mssql.DATETIME2(precision=6)
)
op.drop_index("job_type_heart", table_name="job")
op.drop_index("idx_job_state_heartbeat", table_name="job")
op.alter_column(table_name="job", column_name="start_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="job", column_name="end_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="job", column_name="latest_heartbeat", type_=mssql.DATETIME2(precision=6))
op.create_index("idx_job_state_heartbeat", "job", ["state", "latest_heartbeat"], unique=False)
op.create_index("job_type_heart", "job", ["job_type", "latest_heartbeat"], unique=False)
def downgrade():
"""Change datetime2(6) back to datetime."""
conn = op.get_bind()
if conn.dialect.name == "mssql":
result = conn.execute(
text(
"""SELECT CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '8%' THEN '2000' WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '9%' THEN '2005' ELSE '2005Plus' END AS MajorVersion"""
)
).fetchone()
mssql_version = result[0]
if mssql_version in ("2000", "2005"):
return
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.drop_index("idx_task_reschedule_dag_task_date")
task_reschedule_batch_op.drop_constraint("task_reschedule_dag_task_date_fkey", type_="foreignkey")
task_reschedule_batch_op.alter_column(
column_name="execution_date", type_=mssql.DATETIME, nullable=False
)
task_reschedule_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME)
task_reschedule_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME)
task_reschedule_batch_op.alter_column(column_name="reschedule_date", type_=mssql.DATETIME)
with op.batch_alter_table("task_instance") as task_instance_batch_op:
task_instance_batch_op.drop_index("ti_state_lkp")
task_instance_batch_op.drop_index("ti_dag_date")
modify_execution_date_with_constraint(
conn, task_instance_batch_op, "task_instance", mssql.DATETIME, False
)
task_instance_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME)
task_instance_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME)
task_instance_batch_op.alter_column(column_name="queued_dttm", type_=mssql.DATETIME)
task_instance_batch_op.create_index(
"ti_state_lkp", ["dag_id", "task_id", "execution_date"], unique=False
)
task_instance_batch_op.create_index("ti_dag_date", ["dag_id", "execution_date"], unique=False)
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.create_foreign_key(
"task_reschedule_dag_task_date_fkey",
"task_instance",
["task_id", "dag_id", "execution_date"],
["task_id", "dag_id", "execution_date"],
ondelete="CASCADE",
)
task_reschedule_batch_op.create_index(
"idx_task_reschedule_dag_task_date", ["dag_id", "task_id", "execution_date"], unique=False
)
with op.batch_alter_table("dag_run") as dag_run_batch_op:
modify_execution_date_with_constraint(conn, dag_run_batch_op, "dag_run", mssql.DATETIME, None)
dag_run_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME)
dag_run_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME)
op.alter_column(table_name="log", column_name="execution_date", type_=mssql.DATETIME)
op.alter_column(table_name="log", column_name="dttm", type_=mssql.DATETIME)
with op.batch_alter_table("sla_miss") as sla_miss_batch_op:
modify_execution_date_with_constraint(conn, sla_miss_batch_op, "sla_miss", mssql.DATETIME, False)
sla_miss_batch_op.alter_column(column_name="timestamp", type_=mssql.DATETIME)
op.drop_index("idx_task_fail_dag_task_date", table_name="task_fail")
op.alter_column(table_name="task_fail", column_name="execution_date", type_=mssql.DATETIME)
op.alter_column(table_name="task_fail", column_name="start_date", type_=mssql.DATETIME)
op.alter_column(table_name="task_fail", column_name="end_date", type_=mssql.DATETIME)
op.create_index(
"idx_task_fail_dag_task_date", "task_fail", ["dag_id", "task_id", "execution_date"], unique=False
)
op.drop_index("idx_xcom_dag_task_date", table_name="xcom")
op.alter_column(table_name="xcom", column_name="execution_date", type_=mssql.DATETIME)
op.alter_column(table_name="xcom", column_name="timestamp", type_=mssql.DATETIME)
op.create_index(
"idx_xcom_dag_task_date", "xcom", ["dag_id", "task_ild", "execution_date"], unique=False
)
op.alter_column(table_name="dag", column_name="last_scheduler_run", type_=mssql.DATETIME)
op.alter_column(table_name="dag", column_name="last_pickled", type_=mssql.DATETIME)
op.alter_column(table_name="dag", column_name="last_expired", type_=mssql.DATETIME)
op.alter_column(table_name="dag_pickle", column_name="created_dttm", type_=mssql.DATETIME)
op.alter_column(table_name="import_error", column_name="timestamp", type_=mssql.DATETIME)
op.drop_index("job_type_heart", table_name="job")
op.drop_index("idx_job_state_heartbeat", table_name="job")
op.alter_column(table_name="job", column_name="start_date", type_=mssql.DATETIME)
op.alter_column(table_name="job", column_name="end_date", type_=mssql.DATETIME)
op.alter_column(table_name="job", column_name="latest_heartbeat", type_=mssql.DATETIME)
op.create_index("idx_job_state_heartbeat", "job", ["state", "latest_heartbeat"], unique=False)
op.create_index("job_type_heart", "job", ["job_type", "latest_heartbeat"], unique=False)
def get_table_constraints(conn, table_name) -> dict[tuple[str, str], list[str]]:
"""Return primary and unique constraint along with column name.
This function return primary and unique constraint
along with column name. some tables like task_instance
is missing primary key constraint name and the name is
auto-generated by sql server. so this function helps to
retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table
"""
query = text(
f"""SELECT tc.CONSTRAINT_NAME , tc.CONSTRAINT_TYPE, ccu.COLUMN_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu ON ccu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
WHERE tc.TABLE_NAME = '{table_name}' AND
(tc.CONSTRAINT_TYPE = 'PRIMARY KEY' or UPPER(tc.CONSTRAINT_TYPE) = 'UNIQUE')
"""
)
result = conn.execute(query).fetchall()
constraint_dict = defaultdict(list)
for constraint, constraint_type, column in result:
constraint_dict[(constraint, constraint_type)].append(column)
return constraint_dict
def reorder_columns(columns):
"""Reorder the columns for creating constraint.
Preserve primary key ordering
``['task_id', 'dag_id', 'execution_date']``
:param columns: columns retrieved from DB related to constraint
:return: ordered column
"""
ordered_columns = []
for column in ["task_id", "dag_id", "execution_date"]:
if column in columns:
ordered_columns.append(column)
for column in columns:
if column not in ["task_id", "dag_id", "execution_date"]:
ordered_columns.append(column)
return ordered_columns
def drop_constraint(operator, constraint_dict):
"""Drop a primary key or unique constraint.
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if "execution_date" in columns:
if constraint[1].lower().startswith("primary"):
operator.drop_constraint(constraint[0], type_="primary")
elif constraint[1].lower().startswith("unique"):
operator.drop_constraint(constraint[0], type_="unique")
def create_constraint(operator, constraint_dict):
"""Create a primary key or unique constraint.
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if "execution_date" in columns:
if constraint[1].lower().startswith("primary"):
operator.create_primary_key(constraint_name=constraint[0], columns=reorder_columns(columns))
elif constraint[1].lower().startswith("unique"):
operator.create_unique_constraint(
constraint_name=constraint[0], columns=reorder_columns(columns)
)
def modify_execution_date_with_constraint(conn, batch_operator, table_name, type_, nullable) -> None:
"""Change type of column execution_date.
Helper function changes type of column execution_date by
dropping and recreating any primary/unique constraint associated with
the column
:param conn: sql connection object
:param batch_operator: batch_alter_table for the table
:param table_name: table name
:param type_: DB column type
:param nullable: nullable (boolean)
:return: a dictionary of ((constraint name, constraint type), column name) of table
"""
constraint_dict = get_table_constraints(conn, table_name)
drop_constraint(batch_operator, constraint_dict)
batch_operator.alter_column(
column_name="execution_date",
type_=type_,
nullable=nullable,
)
create_constraint(batch_operator, constraint_dict)
| 17,089 | 48.109195 | 110 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0116_2_4_0_add_dag_owner_attributes_table.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add dag_owner_attributes table
Revision ID: 1486deb605b4
Revises: f4ff391becb5
Create Date: 2022-08-04 16:59:45.406589
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from airflow.migrations.db_types import StringID
# revision identifiers, used by Alembic.
revision = "1486deb605b4"
down_revision = "f4ff391becb5"
branch_labels = None
depends_on = None
airflow_version = "2.4.0"
def upgrade():
"""Apply Add ``DagOwnerAttributes`` table"""
op.create_table(
"dag_owner_attributes",
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("owner", sa.String(length=500), nullable=False),
sa.Column("link", sa.String(length=500), nullable=False),
sa.ForeignKeyConstraint(["dag_id"], ["dag.dag_id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("dag_id", "owner"),
)
def downgrade():
"""Unapply Add Dataset model"""
op.drop_table("dag_owner_attributes")
| 1,757 | 30.963636 | 80 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0008_1_6_0_task_duration.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Change ``task_instance.task_duration`` type to ``FLOAT``
Revision ID: 2e541a1dcfed
Revises: 1b38cef5b76e
Create Date: 2015-10-28 20:38:41.266143
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "2e541a1dcfed"
down_revision = "1b38cef5b76e"
branch_labels = None
depends_on = None
airflow_version = "1.6.0"
def upgrade():
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column(
"duration",
existing_type=mysql.INTEGER(display_width=11),
type_=sa.Float(),
existing_nullable=True,
)
def downgrade():
pass
| 1,578 | 29.365385 | 62 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0105_2_3_0_add_map_index_to_taskfail.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add map_index to TaskFail
Drop index idx_task_fail_dag_task_date
Add run_id and map_index
Drop execution_date
Add FK `task_fail_ti_fkey`: TF -> TI ([dag_id, task_id, run_id, map_index])
Revision ID: 48925b2719cb
Revises: 4eaab2fe6582
Create Date: 2022-03-14 10:31:11.220720
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import ColumnElement, Update, and_, select
from airflow.migrations.db_types import TIMESTAMP, StringID
# revision identifiers, used by Alembic.
revision = "48925b2719cb"
down_revision = "4eaab2fe6582"
branch_labels = None
depends_on = None
airflow_version = "2.3.0"
ID_LEN = 250
def tables():
global task_instance, task_fail, dag_run
metadata = sa.MetaData()
task_instance = sa.Table(
"task_instance",
metadata,
sa.Column("task_id", StringID()),
sa.Column("dag_id", StringID()),
sa.Column("run_id", StringID()),
sa.Column("map_index", sa.Integer(), server_default="-1"),
sa.Column("execution_date", TIMESTAMP),
)
task_fail = sa.Table(
"task_fail",
metadata,
sa.Column("dag_id", StringID()),
sa.Column("task_id", StringID()),
sa.Column("run_id", StringID()),
sa.Column("map_index", StringID()),
sa.Column("execution_date", TIMESTAMP),
)
dag_run = sa.Table(
"dag_run",
metadata,
sa.Column("dag_id", StringID()),
sa.Column("run_id", StringID()),
sa.Column("execution_date", TIMESTAMP),
)
def _update_value_from_dag_run(
dialect_name: str,
target_table: sa.Table,
target_column: ColumnElement,
join_columns: list[str],
) -> Update:
"""
Grabs a value from the source table ``dag_run`` and updates target with this value.
:param dialect_name: dialect in use
:param target_table: the table to update
:param target_column: the column to update
"""
# for run_id: dag_id, execution_date
# otherwise: dag_id, run_id
condition_list = [getattr(dag_run.c, x) == getattr(target_table.c, x) for x in join_columns]
condition = and_(*condition_list)
if dialect_name == "sqlite":
# Most SQLite versions don't support multi table update (and SQLA doesn't know about it anyway), so we
# need to do a Correlated subquery update
sub_q = select(dag_run.c[target_column.name]).where(condition)
return target_table.update().values({target_column: sub_q})
else:
return target_table.update().where(condition).values({target_column: dag_run.c[target_column.name]})
def upgrade():
tables()
dialect_name = op.get_bind().dialect.name
op.drop_index("idx_task_fail_dag_task_date", table_name="task_fail")
with op.batch_alter_table("task_fail") as batch_op:
batch_op.add_column(sa.Column("map_index", sa.Integer(), server_default="-1", nullable=False))
batch_op.add_column(sa.Column("run_id", type_=StringID(), nullable=True))
update_query = _update_value_from_dag_run(
dialect_name=dialect_name,
target_table=task_fail,
target_column=task_fail.c.run_id,
join_columns=["dag_id", "execution_date"],
)
op.execute(update_query)
with op.batch_alter_table("task_fail") as batch_op:
batch_op.alter_column("run_id", existing_type=StringID(), existing_nullable=True, nullable=False)
batch_op.drop_column("execution_date")
batch_op.create_foreign_key(
"task_fail_ti_fkey",
"task_instance",
["dag_id", "task_id", "run_id", "map_index"],
["dag_id", "task_id", "run_id", "map_index"],
ondelete="CASCADE",
)
def downgrade():
tables()
dialect_name = op.get_bind().dialect.name
op.add_column("task_fail", sa.Column("execution_date", TIMESTAMP, nullable=True))
update_query = _update_value_from_dag_run(
dialect_name=dialect_name,
target_table=task_fail,
target_column=task_fail.c.execution_date,
join_columns=["dag_id", "run_id"],
)
op.execute(update_query)
with op.batch_alter_table("task_fail") as batch_op:
batch_op.alter_column("execution_date", existing_type=TIMESTAMP, nullable=False)
if dialect_name != "sqlite":
batch_op.drop_constraint("task_fail_ti_fkey", type_="foreignkey")
batch_op.drop_column("map_index", mssql_drop_default=True)
batch_op.drop_column("run_id")
op.create_index(
index_name="idx_task_fail_dag_task_date",
table_name="task_fail",
columns=["dag_id", "task_id", "execution_date"],
unique=False,
)
| 5,470 | 33.847134 | 110 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0075_2_0_0_add_description_field_to_connection.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add description field to ``connection`` table
Revision ID: 61ec73d9401f
Revises: 2c6edca13270
Create Date: 2020-09-10 14:56:30.279248
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "61ec73d9401f"
down_revision = "2c6edca13270"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
"""Apply Add description field to ``connection`` table"""
conn = op.get_bind()
with op.batch_alter_table("connection") as batch_op:
if conn.dialect.name == "mysql":
# Handles case where on mysql with utf8mb4 this would exceed the size of row
# We have to set text type in this migration even if originally it was string
# This is permanently fixed in the follow-up migration 64a7d6477aae
batch_op.add_column(sa.Column("description", sa.Text(length=5000), nullable=True))
else:
batch_op.add_column(sa.Column("description", sa.String(length=5000), nullable=True))
def downgrade():
"""Unapply Add description field to ``connection`` table"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.drop_column("description")
| 2,042 | 35.482143 | 96 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0098_2_3_0_added_timetable_description_column.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``timetable_description`` column to DagModel for UI.
Revision ID: 786e3737b18f
Revises: 5e3ec427fdd3
Create Date: 2021-10-15 13:33:04.754052
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "786e3737b18f"
down_revision = "5e3ec427fdd3"
branch_labels = None
depends_on = None
airflow_version = "2.3.0"
def upgrade():
"""Apply Add ``timetable_description`` column to DagModel for UI."""
with op.batch_alter_table("dag", schema=None) as batch_op:
batch_op.add_column(sa.Column("timetable_description", sa.String(length=1000), nullable=True))
def downgrade():
"""Unapply Add ``timetable_description`` column to DagModel for UI."""
is_sqlite = bool(op.get_bind().dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table("dag") as batch_op:
batch_op.drop_column("timetable_description")
if is_sqlite:
op.execute("PRAGMA foreign_keys=on")
| 1,830 | 33.54717 | 102 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0076_2_0_0_fix_description_field_in_connection_to_.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Fix description field in ``connection`` to be ``text``
Revision ID: 64a7d6477aae
Revises: f5b5ec089444
Create Date: 2020-11-25 08:56:11.866607
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "64a7d6477aae"
down_revision = "61ec73d9401f"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
"""Apply Fix description field in ``connection`` to be ``text``"""
conn = op.get_bind()
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
"connection",
"description",
existing_type=sa.String(length=5000),
type_=sa.Text(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column("connection", "description", existing_type=sa.String(length=5000), type_=sa.Text())
def downgrade():
"""Unapply Fix description field in ``connection`` to be ``text``"""
conn = op.get_bind()
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
"connection",
"description",
existing_type=sa.Text(5000),
type_=sa.String(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column(
"connection",
"description",
existing_type=sa.Text(),
type_=sa.String(length=5000),
existing_nullable=True,
)
| 2,596 | 31.4625 | 107 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0121_2_5_0_add_dagrunnote_and_taskinstancenote.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add DagRunNote and TaskInstanceNote
Revision ID: 1986afd32c1b
Revises: ee8d93fcc81e
Create Date: 2022-11-22 21:49:05.843439
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from airflow.migrations.db_types import StringID
from airflow.utils.sqlalchemy import UtcDateTime
# revision identifiers, used by Alembic.
revision = "1986afd32c1b"
down_revision = "ee8d93fcc81e"
branch_labels = None
depends_on = None
airflow_version = "2.5.0"
def upgrade():
"""Apply Add DagRunNote and TaskInstanceNote"""
op.create_table(
"dag_run_note",
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("dag_run_id", sa.Integer(), nullable=False),
sa.Column(
"content", sa.String(length=1000).with_variant(sa.Text(length=1000), "mysql"), nullable=True
),
sa.Column("created_at", UtcDateTime(timezone=True), nullable=False),
sa.Column("updated_at", UtcDateTime(timezone=True), nullable=False),
sa.ForeignKeyConstraint(
("dag_run_id",), ["dag_run.id"], name="dag_run_note_dr_fkey", ondelete="CASCADE"
),
sa.ForeignKeyConstraint(("user_id",), ["ab_user.id"], name="dag_run_note_user_fkey"),
sa.PrimaryKeyConstraint("dag_run_id", name=op.f("dag_run_note_pkey")),
)
op.create_table(
"task_instance_note",
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("task_id", StringID(), nullable=False),
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("run_id", StringID(), nullable=False),
sa.Column("map_index", sa.Integer(), nullable=False),
sa.Column(
"content", sa.String(length=1000).with_variant(sa.Text(length=1000), "mysql"), nullable=True
),
sa.Column("created_at", UtcDateTime(timezone=True), nullable=False),
sa.Column("updated_at", UtcDateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint(
"task_id", "dag_id", "run_id", "map_index", name=op.f("task_instance_note_pkey")
),
sa.ForeignKeyConstraint(
("dag_id", "task_id", "run_id", "map_index"),
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_instance_note_ti_fkey",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(("user_id",), ["ab_user.id"], name="task_instance_note_user_fkey"),
)
def downgrade():
"""Unapply Add DagRunNote and TaskInstanceNote"""
op.drop_table("task_instance_note")
op.drop_table("dag_run_note")
| 3,507 | 35.926316 | 104 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0036_1_10_2_add_index_to_taskinstance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add index to ``task_instance`` table
Revision ID: bf00311e1990
Revises: dd25f486b8ea
Create Date: 2018-09-12 09:53:52.007433
"""
from __future__ import annotations
from alembic import op
# revision identifiers, used by Alembic.
revision = "bf00311e1990"
down_revision = "dd25f486b8ea"
branch_labels = None
depends_on = None
airflow_version = "1.10.2"
def upgrade():
op.create_index("ti_dag_date", "task_instance", ["dag_id", "execution_date"], unique=False)
def downgrade():
op.drop_index("ti_dag_date", table_name="task_instance")
| 1,338 | 30.139535 | 95 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0104_2_3_0_migrate_rtif_to_use_run_id_and_map_index.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Migrate RTIF to use run_id and map_index
Revision ID: 4eaab2fe6582
Revises: c97c2ab6aa23
Create Date: 2022-03-03 17:48:29.955821
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import and_, select
from sqlalchemy.sql.schema import ForeignKeyConstraint
from airflow.migrations.db_types import TIMESTAMP, StringID
from airflow.migrations.utils import get_mssql_table_constraints
ID_LEN = 250
# revision identifiers, used by Alembic.
revision = "4eaab2fe6582"
down_revision = "c97c2ab6aa23"
branch_labels = None
depends_on = None
airflow_version = "2.3.0"
# Just Enough Table to run the conditions for update.
def tables(for_downgrade=False):
import sqlalchemy_jsonfield
global task_instance, rendered_task_instance_fields, dag_run
metadata = sa.MetaData()
task_instance = sa.Table(
"task_instance",
metadata,
sa.Column("task_id", StringID()),
sa.Column("dag_id", StringID()),
sa.Column("run_id", StringID()),
sa.Column("execution_date", TIMESTAMP),
)
rendered_task_instance_fields = sa.Table(
"rendered_task_instance_fields",
metadata,
sa.Column("dag_id", StringID()),
sa.Column("task_id", StringID()),
sa.Column("run_id", StringID()),
sa.Column("execution_date", TIMESTAMP),
sa.Column("rendered_fields", sqlalchemy_jsonfield.JSONField(), nullable=False),
sa.Column("k8s_pod_yaml", sqlalchemy_jsonfield.JSONField(), nullable=True),
)
if for_downgrade:
rendered_task_instance_fields.append_column(
sa.Column("map_index", sa.Integer(), server_default="-1"),
)
rendered_task_instance_fields.append_constraint(
ForeignKeyConstraint(
["dag_id", "run_id"],
["dag_run.dag_id", "dag_run.run_id"],
name="rtif_dag_run_fkey",
ondelete="CASCADE",
),
)
dag_run = sa.Table(
"dag_run",
metadata,
sa.Column("dag_id", StringID()),
sa.Column("run_id", StringID()),
sa.Column("execution_date", TIMESTAMP),
)
def _multi_table_update(dialect_name, target, column):
condition = dag_run.c.dag_id == target.c.dag_id
if column == target.c.run_id:
condition = and_(condition, dag_run.c.execution_date == target.c.execution_date)
else:
condition = and_(condition, dag_run.c.run_id == target.c.run_id)
if dialect_name == "sqlite":
# Most SQLite versions don't support multi table update (and SQLA doesn't know about it anyway), so we
# need to do a Correlated subquery update
sub_q = select(dag_run.c[column.name]).where(condition)
return target.update().values({column: sub_q})
else:
return target.update().where(condition).values({column: dag_run.c[column.name]})
def upgrade():
tables()
dialect_name = op.get_bind().dialect.name
with op.batch_alter_table("rendered_task_instance_fields") as batch_op:
batch_op.add_column(sa.Column("map_index", sa.Integer(), server_default="-1", nullable=False))
rendered_task_instance_fields.append_column(
sa.Column("map_index", sa.Integer(), server_default="-1", nullable=False)
)
batch_op.add_column(sa.Column("run_id", type_=StringID(), nullable=True))
update_query = _multi_table_update(
dialect_name, rendered_task_instance_fields, rendered_task_instance_fields.c.run_id
)
op.execute(update_query)
with op.batch_alter_table(
"rendered_task_instance_fields", copy_from=rendered_task_instance_fields
) as batch_op:
if dialect_name == "mssql":
constraints = get_mssql_table_constraints(op.get_bind(), "rendered_task_instance_fields")
pk, _ = constraints["PRIMARY KEY"].popitem()
batch_op.drop_constraint(pk, type_="primary")
elif dialect_name != "sqlite":
batch_op.drop_constraint("rendered_task_instance_fields_pkey", type_="primary")
batch_op.alter_column("run_id", existing_type=StringID(), existing_nullable=True, nullable=False)
batch_op.drop_column("execution_date")
batch_op.create_primary_key(
"rendered_task_instance_fields_pkey", ["dag_id", "task_id", "run_id", "map_index"]
)
batch_op.create_foreign_key(
"rtif_ti_fkey",
"task_instance",
["dag_id", "task_id", "run_id", "map_index"],
["dag_id", "task_id", "run_id", "map_index"],
ondelete="CASCADE",
)
def downgrade():
tables(for_downgrade=True)
dialect_name = op.get_bind().dialect.name
op.add_column("rendered_task_instance_fields", sa.Column("execution_date", TIMESTAMP, nullable=True))
update_query = _multi_table_update(
dialect_name, rendered_task_instance_fields, rendered_task_instance_fields.c.execution_date
)
op.execute(update_query)
with op.batch_alter_table(
"rendered_task_instance_fields", copy_from=rendered_task_instance_fields
) as batch_op:
batch_op.alter_column("execution_date", existing_type=TIMESTAMP, nullable=False)
if dialect_name != "sqlite":
batch_op.drop_constraint("rtif_ti_fkey", type_="foreignkey")
batch_op.drop_constraint("rendered_task_instance_fields_pkey", type_="primary")
batch_op.create_primary_key(
"rendered_task_instance_fields_pkey", ["dag_id", "task_id", "execution_date"]
)
batch_op.drop_column("map_index", mssql_drop_default=True)
batch_op.drop_column("run_id")
| 6,460 | 37.230769 | 110 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0049_1_10_7_merge_heads.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Straighten out the migrations
Revision ID: 08364691d074
Revises: a56c9515abdc, 004c1210f153, 74effc47d867, b3b105409875
Create Date: 2019-11-19 22:05:11.752222
"""
from __future__ import annotations
# revision identifiers, used by Alembic.
revision = "08364691d074"
down_revision = ("a56c9515abdc", "004c1210f153", "74effc47d867", "b3b105409875")
branch_labels = None
depends_on = None
airflow_version = "1.10.7"
def upgrade():
pass
def downgrade():
pass
| 1,260 | 29.756098 | 80 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0071_2_0_0_add_job_id_to_dagrun_table.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``creating_job_id`` to ``DagRun`` table
Revision ID: 364159666cbd
Revises: 52d53670a240
Create Date: 2020-10-10 09:08:07.332456
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "364159666cbd"
down_revision = "52d53670a240"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
"""Apply Add ``creating_job_id`` to ``DagRun`` table"""
op.add_column("dag_run", sa.Column("creating_job_id", sa.Integer))
def downgrade():
"""Unapply Add job_id to DagRun table"""
op.drop_column("dag_run", "creating_job_id")
| 1,436 | 30.23913 | 70 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0095_2_2_4_add_session_table_to_db.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Create a ``session`` table to store web session data
Revision ID: c381b21cb7e4
Revises: be2bfac3da23
Create Date: 2022-01-25 13:56:35.069429
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "c381b21cb7e4"
down_revision = "be2bfac3da23"
branch_labels = None
depends_on = None
airflow_version = "2.2.4"
TABLE_NAME = "session"
def upgrade():
"""Apply Create a ``session`` table to store web session data"""
op.create_table(
TABLE_NAME,
sa.Column("id", sa.Integer()),
sa.Column("session_id", sa.String(255)),
sa.Column("data", sa.LargeBinary()),
sa.Column("expiry", sa.DateTime()),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("session_id"),
)
def downgrade():
"""Unapply Create a ``session`` table to store web session data"""
op.drop_table(TABLE_NAME)
| 1,720 | 29.732143 | 70 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0096_2_2_4_adding_index_for_dag_id_in_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add index for ``dag_id`` column in ``job`` table.
Revision ID: 587bdf053233
Revises: c381b21cb7e4
Create Date: 2021-12-14 10:20:12.482940
"""
from __future__ import annotations
from alembic import op
# revision identifiers, used by Alembic.
revision = "587bdf053233"
down_revision = "c381b21cb7e4"
branch_labels = None
depends_on = None
airflow_version = "2.2.4"
def upgrade():
"""Apply Add index for ``dag_id`` column in ``job`` table."""
op.create_index("idx_job_dag_id", "job", ["dag_id"], unique=False)
def downgrade():
"""Unapply Add index for ``dag_id`` column in ``job`` table."""
op.drop_index("idx_job_dag_id", table_name="job")
| 1,452 | 31.288889 | 70 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0007_1_6_0_add_dagrun.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``dag_run`` table
Revision ID: 1b38cef5b76e
Revises: 52d714495f0
Create Date: 2015-10-27 08:31:48.475140
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from airflow.migrations.db_types import StringID
# revision identifiers, used by Alembic.
revision = "1b38cef5b76e"
down_revision = "502898887f84"
branch_labels = None
depends_on = None
airflow_version = "1.6.0"
def upgrade():
op.create_table(
"dag_run",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("dag_id", StringID(), nullable=True),
sa.Column("execution_date", sa.DateTime(), nullable=True),
sa.Column("state", sa.String(length=50), nullable=True),
sa.Column("run_id", StringID(), nullable=True),
sa.Column("external_trigger", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("dag_id", "execution_date"),
sa.UniqueConstraint("dag_id", "run_id"),
)
def downgrade():
op.drop_table("dag_run")
| 1,825 | 31.035088 | 67 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0064_2_0_0_add_unique_constraint_to_conn_id.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add unique constraint to ``conn_id``
Revision ID: 8d48763f6d53
Revises: 8f966b9c467a
Create Date: 2020-05-03 16:55:01.834231
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from airflow.models.base import COLLATION_ARGS
# revision identifiers, used by Alembic.
revision = "8d48763f6d53"
down_revision = "8f966b9c467a"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
"""Apply Add unique constraint to ``conn_id`` and set it as non-nullable"""
try:
with op.batch_alter_table("connection") as batch_op:
batch_op.alter_column("conn_id", nullable=False, existing_type=sa.String(250, **COLLATION_ARGS))
batch_op.create_unique_constraint(constraint_name="unique_conn_id", columns=["conn_id"])
except sa.exc.IntegrityError:
raise Exception("Make sure there are no duplicate connections with the same conn_id or null values")
def downgrade():
"""Unapply Add unique constraint to ``conn_id`` and set it as non-nullable"""
with op.batch_alter_table("connection") as batch_op:
batch_op.drop_constraint(constraint_name="unique_conn_id", type_="unique")
batch_op.alter_column("conn_id", nullable=True, existing_type=sa.String(250))
| 2,065 | 35.245614 | 108 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0063_2_0_0_set_conn_type_as_non_nullable.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Set ``conn_type`` as non-nullable
Revision ID: 8f966b9c467a
Revises: 3c20cacc0044
Create Date: 2020-06-08 22:36:34.534121
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy.ext.declarative import declarative_base
# revision identifiers, used by Alembic.
revision = "8f966b9c467a"
down_revision = "3c20cacc0044"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
"""Apply Set ``conn_type`` as non-nullable"""
Base = declarative_base()
class Connection(Base):
__tablename__ = "connection"
id = sa.Column(sa.Integer(), primary_key=True)
conn_id = sa.Column(sa.String(250))
conn_type = sa.Column(sa.String(500))
# Generate run type for existing records
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
# imap_default was missing it's type, let's fix that up
session.query(Connection).filter_by(conn_id="imap_default", conn_type=None).update(
{Connection.conn_type: "imap"}, synchronize_session=False
)
session.commit()
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column("conn_type", existing_type=sa.VARCHAR(length=500), nullable=False)
def downgrade():
"""Unapply Set ``conn_type`` as non-nullable"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column("conn_type", existing_type=sa.VARCHAR(length=500), nullable=True)
| 2,339 | 32.913043 | 96 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0127_2_7_0_add_custom_operator_name_column.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add custom_operator_name column
Revision ID: 788397e78828
Revises: 937cbd173ca1
Create Date: 2023-06-12 10:46:52.125149
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "788397e78828"
down_revision = "937cbd173ca1"
branch_labels = None
depends_on = None
airflow_version = "2.7.0"
TABLE_NAME = "task_instance"
def upgrade():
"""Apply Add custom_operator_name column"""
with op.batch_alter_table(TABLE_NAME) as batch_op:
batch_op.add_column(sa.Column("custom_operator_name", sa.VARCHAR(length=1000), nullable=True))
def downgrade():
"""Unapply Add custom_operator_name column"""
with op.batch_alter_table(TABLE_NAME) as batch_op:
batch_op.drop_column("custom_operator_name")
| 1,594 | 30.27451 | 102 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0065_2_0_0_update_schema_for_smart_sensor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``sensor_instance`` table
Revision ID: e38be357a868
Revises: 8d48763f6d53
Create Date: 2019-06-07 04:03:17.003939
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy import func, inspect
from airflow.migrations.db_types import TIMESTAMP, StringID
# revision identifiers, used by Alembic.
revision = "e38be357a868"
down_revision = "8d48763f6d53"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "sensor_instance" in tables:
return
op.create_table(
"sensor_instance",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("task_id", StringID(), nullable=False),
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("execution_date", TIMESTAMP, nullable=False),
sa.Column("state", sa.String(length=20), nullable=True),
sa.Column("try_number", sa.Integer(), nullable=True),
sa.Column("start_date", TIMESTAMP, nullable=True),
sa.Column("operator", sa.String(length=1000), nullable=False),
sa.Column("op_classpath", sa.String(length=1000), nullable=False),
sa.Column("hashcode", sa.BigInteger(), nullable=False),
sa.Column("shardcode", sa.Integer(), nullable=False),
sa.Column("poke_context", sa.Text(), nullable=False),
sa.Column("execution_context", sa.Text(), nullable=True),
sa.Column("created_at", TIMESTAMP, default=func.now, nullable=False),
sa.Column("updated_at", TIMESTAMP, default=func.now, nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("ti_primary_key", "sensor_instance", ["dag_id", "task_id", "execution_date"], unique=True)
op.create_index("si_hashcode", "sensor_instance", ["hashcode"], unique=False)
op.create_index("si_shardcode", "sensor_instance", ["shardcode"], unique=False)
op.create_index("si_state_shard", "sensor_instance", ["state", "shardcode"], unique=False)
op.create_index("si_updated_at", "sensor_instance", ["updated_at"], unique=False)
def downgrade():
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "sensor_instance" in tables:
op.drop_table("sensor_instance")
| 3,144 | 37.82716 | 110 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0113_2_4_0_compare_types_between_orm_and_db.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""compare types between ORM and DB.
Revision ID: 44b7034f6bdc
Revises: 424117c37d18
Create Date: 2022-05-31 09:16:44.558754
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from airflow.migrations.db_types import TIMESTAMP
# revision identifiers, used by Alembic.
revision = "44b7034f6bdc"
down_revision = "424117c37d18"
branch_labels = None
depends_on = None
airflow_version = "2.4.0"
def upgrade():
"""Apply compare types between ORM and DB."""
conn = op.get_bind()
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column(
"extra",
existing_type=sa.TEXT(),
type_=sa.Text(),
existing_nullable=True,
)
with op.batch_alter_table("log_template", schema=None) as batch_op:
batch_op.alter_column(
"created_at", existing_type=sa.DateTime(), type_=TIMESTAMP(), existing_nullable=False
)
with op.batch_alter_table("serialized_dag", schema=None) as batch_op:
# drop server_default
batch_op.alter_column(
"dag_hash",
existing_type=sa.String(32),
server_default=None,
type_=sa.String(32),
existing_nullable=False,
)
with op.batch_alter_table("trigger", schema=None) as batch_op:
batch_op.alter_column(
"created_date", existing_type=sa.DateTime(), type_=TIMESTAMP(), existing_nullable=False
)
if conn.dialect.name != "sqlite":
return
with op.batch_alter_table("serialized_dag", schema=None) as batch_op:
batch_op.alter_column("fileloc_hash", existing_type=sa.Integer, type_=sa.BigInteger())
# Some sqlite date are not in db_types.TIMESTAMP. Convert these to TIMESTAMP.
with op.batch_alter_table("dag", schema=None) as batch_op:
batch_op.alter_column(
"last_pickled", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
batch_op.alter_column(
"last_expired", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("dag_pickle", schema=None) as batch_op:
batch_op.alter_column(
"created_dttm", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("dag_run", schema=None) as batch_op:
batch_op.alter_column(
"execution_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=False
)
batch_op.alter_column(
"start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
batch_op.alter_column(
"end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("import_error", schema=None) as batch_op:
batch_op.alter_column(
"timestamp", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("job", schema=None) as batch_op:
batch_op.alter_column(
"start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
batch_op.alter_column(
"end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
batch_op.alter_column(
"latest_heartbeat", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("log", schema=None) as batch_op:
batch_op.alter_column("dttm", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True)
batch_op.alter_column(
"execution_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("serialized_dag", schema=None) as batch_op:
batch_op.alter_column(
"last_updated", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=False
)
with op.batch_alter_table("sla_miss", schema=None) as batch_op:
batch_op.alter_column(
"execution_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=False
)
batch_op.alter_column(
"timestamp", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("task_fail", schema=None) as batch_op:
batch_op.alter_column(
"start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
batch_op.alter_column(
"end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.alter_column(
"start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
batch_op.alter_column(
"end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
batch_op.alter_column(
"queued_dttm", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True
)
def downgrade():
"""Unapply compare types between ORM and DB."""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column(
"extra",
existing_type=sa.Text(),
type_=sa.TEXT(),
existing_nullable=True,
)
with op.batch_alter_table("log_template", schema=None) as batch_op:
batch_op.alter_column(
"created_at", existing_type=TIMESTAMP(), type_=sa.DateTime(), existing_nullable=False
)
with op.batch_alter_table("serialized_dag", schema=None) as batch_op:
# add server_default
batch_op.alter_column(
"dag_hash",
existing_type=sa.String(32),
server_default="Hash not calculated yet",
type_=sa.String(32),
existing_nullable=False,
)
with op.batch_alter_table("trigger", schema=None) as batch_op:
batch_op.alter_column(
"created_date", existing_type=TIMESTAMP(), type_=sa.DateTime(), existing_nullable=False
)
conn = op.get_bind()
if conn.dialect.name != "sqlite":
return
with op.batch_alter_table("serialized_dag", schema=None) as batch_op:
batch_op.alter_column("fileloc_hash", existing_type=sa.BigInteger, type_=sa.Integer())
# Change these column back to sa.DATETIME()
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.alter_column(
"queued_dttm", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
with op.batch_alter_table("task_fail", schema=None) as batch_op:
batch_op.alter_column(
"end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
with op.batch_alter_table("sla_miss", schema=None) as batch_op:
batch_op.alter_column(
"timestamp", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"execution_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=False
)
with op.batch_alter_table("serialized_dag", schema=None) as batch_op:
batch_op.alter_column(
"last_updated", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=False
)
with op.batch_alter_table("log", schema=None) as batch_op:
batch_op.alter_column(
"execution_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column("dttm", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True)
with op.batch_alter_table("job", schema=None) as batch_op:
batch_op.alter_column(
"latest_heartbeat", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
with op.batch_alter_table("import_error", schema=None) as batch_op:
batch_op.alter_column(
"timestamp", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
with op.batch_alter_table("dag_run", schema=None) as batch_op:
batch_op.alter_column(
"end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"execution_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=False
)
with op.batch_alter_table("dag_pickle", schema=None) as batch_op:
batch_op.alter_column(
"created_dttm", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
with op.batch_alter_table("dag", schema=None) as batch_op:
batch_op.alter_column(
"last_expired", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
batch_op.alter_column(
"last_pickled", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True
)
| 10,819 | 40.140684 | 109 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0047_1_10_4_increase_queue_name_size_limit.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Increase queue name size limit
Revision ID: 004c1210f153
Revises: 939bb1e647c8
Create Date: 2019-06-07 07:46:04.262275
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "004c1210f153"
down_revision = "939bb1e647c8"
branch_labels = None
depends_on = None
airflow_version = "1.10.4"
def upgrade():
"""
Increase column size from 50 to 256 characters, closing AIRFLOW-4737 caused
by broker backends that might use unusually large queue names.
"""
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column("queue", type_=sa.String(256))
def downgrade():
"""Revert column size from 256 to 50 characters, might result in data loss."""
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column("queue", type_=sa.String(50))
| 1,800 | 32.981132 | 82 |
py
|
airflow
|
airflow-main/airflow/migrations/versions/0001_1_5_0_current_schema.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""current schema
Revision ID: e3a246e0dc1
Revises:
Create Date: 2015-08-18 16:35:00.883495
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from sqlalchemy import func, inspect
from airflow.migrations.db_types import StringID
# revision identifiers, used by Alembic.
revision = "e3a246e0dc1"
down_revision = None
branch_labels = None
depends_on = None
airflow_version = "1.5.0"
def upgrade():
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "connection" not in tables:
op.create_table(
"connection",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("conn_id", StringID(), nullable=True),
sa.Column("conn_type", sa.String(length=500), nullable=True),
sa.Column("host", sa.String(length=500), nullable=True),
sa.Column("schema", sa.String(length=500), nullable=True),
sa.Column("login", sa.String(length=500), nullable=True),
sa.Column("password", sa.String(length=500), nullable=True),
sa.Column("port", sa.Integer(), nullable=True),
sa.Column("extra", sa.String(length=5000), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
if "dag" not in tables:
op.create_table(
"dag",
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("is_paused", sa.Boolean(), nullable=True),
sa.Column("is_subdag", sa.Boolean(), nullable=True),
sa.Column("is_active", sa.Boolean(), nullable=True),
sa.Column("last_scheduler_run", sa.DateTime(), nullable=True),
sa.Column("last_pickled", sa.DateTime(), nullable=True),
sa.Column("last_expired", sa.DateTime(), nullable=True),
sa.Column("scheduler_lock", sa.Boolean(), nullable=True),
sa.Column("pickle_id", sa.Integer(), nullable=True),
sa.Column("fileloc", sa.String(length=2000), nullable=True),
sa.Column("owners", sa.String(length=2000), nullable=True),
sa.PrimaryKeyConstraint("dag_id"),
)
if "dag_pickle" not in tables:
op.create_table(
"dag_pickle",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("pickle", sa.PickleType(), nullable=True),
sa.Column("created_dttm", sa.DateTime(), nullable=True),
sa.Column("pickle_hash", sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
if "import_error" not in tables:
op.create_table(
"import_error",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("timestamp", sa.DateTime(), nullable=True),
sa.Column("filename", sa.String(length=1024), nullable=True),
sa.Column("stacktrace", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
if "job" not in tables:
op.create_table(
"job",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("dag_id", sa.String(length=250), nullable=True),
sa.Column("state", sa.String(length=20), nullable=True),
sa.Column("job_type", sa.String(length=30), nullable=True),
sa.Column("start_date", sa.DateTime(), nullable=True),
sa.Column("end_date", sa.DateTime(), nullable=True),
sa.Column("latest_heartbeat", sa.DateTime(), nullable=True),
sa.Column("executor_class", sa.String(length=500), nullable=True),
sa.Column("hostname", sa.String(length=500), nullable=True),
sa.Column("unixname", sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("job_type_heart", "job", ["job_type", "latest_heartbeat"], unique=False)
if "log" not in tables:
op.create_table(
"log",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("dttm", sa.DateTime(), nullable=True),
sa.Column("dag_id", StringID(), nullable=True),
sa.Column("task_id", StringID(), nullable=True),
sa.Column("event", sa.String(length=30), nullable=True),
sa.Column("execution_date", sa.DateTime(), nullable=True),
sa.Column("owner", sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
if "sla_miss" not in tables:
op.create_table(
"sla_miss",
sa.Column("task_id", StringID(), nullable=False),
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("execution_date", sa.DateTime(), nullable=False),
sa.Column("email_sent", sa.Boolean(), nullable=True),
sa.Column("timestamp", sa.DateTime(), nullable=True),
sa.Column("description", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("task_id", "dag_id", "execution_date"),
)
if "slot_pool" not in tables:
op.create_table(
"slot_pool",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("pool", StringID(length=50), nullable=True),
sa.Column("slots", sa.Integer(), nullable=True),
sa.Column("description", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("pool"),
)
if "task_instance" not in tables:
op.create_table(
"task_instance",
sa.Column("task_id", StringID(), nullable=False),
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("execution_date", sa.DateTime(), nullable=False),
sa.Column("start_date", sa.DateTime(), nullable=True),
sa.Column("end_date", sa.DateTime(), nullable=True),
sa.Column("duration", sa.Integer(), nullable=True),
sa.Column("state", sa.String(length=20), nullable=True),
sa.Column("try_number", sa.Integer(), nullable=True),
sa.Column("hostname", sa.String(length=1000), nullable=True),
sa.Column("unixname", sa.String(length=1000), nullable=True),
sa.Column("job_id", sa.Integer(), nullable=True),
sa.Column("pool", sa.String(length=50), nullable=True),
sa.Column("queue", sa.String(length=50), nullable=True),
sa.Column("priority_weight", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("task_id", "dag_id", "execution_date"),
)
op.create_index("ti_dag_state", "task_instance", ["dag_id", "state"], unique=False)
op.create_index("ti_pool", "task_instance", ["pool", "state", "priority_weight"], unique=False)
op.create_index(
"ti_state_lkp", "task_instance", ["dag_id", "task_id", "execution_date", "state"], unique=False
)
if "user" not in tables:
op.create_table(
"user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("username", StringID(), nullable=True),
sa.Column("email", sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("username"),
)
if "variable" not in tables:
op.create_table(
"variable",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("key", StringID(), nullable=True),
sa.Column("val", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("key"),
)
if "chart" not in tables:
op.create_table(
"chart",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("label", sa.String(length=200), nullable=True),
sa.Column("conn_id", sa.String(length=250), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("chart_type", sa.String(length=100), nullable=True),
sa.Column("sql_layout", sa.String(length=50), nullable=True),
sa.Column("sql", sa.Text(), nullable=True),
sa.Column("y_log_scale", sa.Boolean(), nullable=True),
sa.Column("show_datatable", sa.Boolean(), nullable=True),
sa.Column("show_sql", sa.Boolean(), nullable=True),
sa.Column("height", sa.Integer(), nullable=True),
sa.Column("default_params", sa.String(length=5000), nullable=True),
sa.Column("x_is_date", sa.Boolean(), nullable=True),
sa.Column("iteration_no", sa.Integer(), nullable=True),
sa.Column("last_modified", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
if "xcom" not in tables:
op.create_table(
"xcom",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("key", StringID(length=512), nullable=True),
sa.Column("value", sa.PickleType(), nullable=True),
sa.Column("timestamp", sa.DateTime(), default=func.now, nullable=False),
sa.Column("execution_date", sa.DateTime(), nullable=False),
sa.Column("task_id", StringID(), nullable=False),
sa.Column("dag_id", StringID(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
def downgrade():
op.drop_table("chart")
op.drop_table("variable")
op.drop_table("user")
op.drop_index("ti_state_lkp", table_name="task_instance")
op.drop_index("ti_pool", table_name="task_instance")
op.drop_index("ti_dag_state", table_name="task_instance")
op.drop_table("task_instance")
op.drop_table("slot_pool")
op.drop_table("sla_miss")
op.drop_table("log")
op.drop_index("job_type_heart", table_name="job")
op.drop_table("job")
op.drop_table("import_error")
op.drop_table("dag_pickle")
op.drop_table("dag")
op.drop_table("connection")
op.drop_table("xcom")
| 10,898 | 44.03719 | 107 |
py
|
airflow
|
airflow-main/airflow/compat/functools.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import sys
if sys.version_info >= (3, 9):
from functools import cache
else:
from functools import lru_cache
cache = lru_cache(maxsize=None)
# We need to keep it around, in case it was used in the code of old providers, but since we are
# Python 3.8+ we can directly import the functools one
from functools import cached_property # type: ignore
__all__ = ["cache", "cached_property"]
| 1,223 | 35 | 95 |
py
|
airflow
|
airflow-main/airflow/compat/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/listeners/listener.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
import pluggy
from airflow.plugins_manager import integrate_listener_plugins
if TYPE_CHECKING:
from pluggy._hooks import _HookRelay
log = logging.getLogger(__name__)
_listener_manager: ListenerManager | None = None
class ListenerManager:
"""Manage listener registration and provides hook property for calling them."""
def __init__(self):
from airflow.listeners.spec import dagrun, lifecycle, taskinstance
self.pm = pluggy.PluginManager("airflow")
self.pm.add_hookspecs(lifecycle)
self.pm.add_hookspecs(dagrun)
self.pm.add_hookspecs(taskinstance)
@property
def has_listeners(self) -> bool:
return len(self.pm.get_plugins()) > 0
@property
def hook(self) -> _HookRelay:
"""Return hook, on which plugin methods specified in spec can be called."""
return self.pm.hook
def add_listener(self, listener):
if self.pm.is_registered(listener):
return
self.pm.register(listener)
def clear(self):
"""Remove registered plugins."""
for plugin in self.pm.get_plugins():
self.pm.unregister(plugin)
def get_listener_manager() -> ListenerManager:
"""Get singleton listener manager."""
global _listener_manager
if not _listener_manager:
_listener_manager = ListenerManager()
integrate_listener_plugins(_listener_manager)
return _listener_manager
| 2,301 | 30.108108 | 83 |
py
|
airflow
|
airflow-main/airflow/listeners/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from pluggy import HookimplMarker
hookimpl = HookimplMarker("airflow")
| 895 | 37.956522 | 62 |
py
|
airflow
|
airflow-main/airflow/listeners/spec/taskinstance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from pluggy import HookspecMarker
if TYPE_CHECKING:
from sqlalchemy.orm.session import Session
from airflow.models.taskinstance import TaskInstance
from airflow.utils.state import TaskInstanceState
hookspec = HookspecMarker("airflow")
@hookspec
def on_task_instance_running(
previous_state: TaskInstanceState | None, task_instance: TaskInstance, session: Session | None
):
"""Called when task state changes to RUNNING. previous_state can be None."""
@hookspec
def on_task_instance_success(
previous_state: TaskInstanceState | None, task_instance: TaskInstance, session: Session | None
):
"""Called when task state changes to SUCCESS. previous_state can be None."""
@hookspec
def on_task_instance_failed(
previous_state: TaskInstanceState | None, task_instance: TaskInstance, session: Session | None
):
"""Called when task state changes to FAIL. previous_state can be None."""
| 1,778 | 33.211538 | 98 |
py
|
airflow
|
airflow-main/airflow/listeners/spec/lifecycle.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from pluggy import HookspecMarker
hookspec = HookspecMarker("airflow")
@hookspec
def on_starting(component):
"""
Called before Airflow component - jobs like scheduler, worker, or task runner starts.
It's guaranteed this will be called before any other plugin method.
:param component: Component that calls this method
"""
@hookspec
def before_stopping(component):
"""
Called before Airflow component - jobs like scheduler, worker, or task runner stops.
It's guaranteed this will be called after any other plugin method.
:param component: Component that calls this method
"""
| 1,447 | 31.177778 | 89 |
py
|
airflow
|
airflow-main/airflow/listeners/spec/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/listeners/spec/dagrun.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from pluggy import HookspecMarker
if TYPE_CHECKING:
from airflow.models.dagrun import DagRun
hookspec = HookspecMarker("airflow")
@hookspec
def on_dag_run_running(dag_run: DagRun, msg: str):
"""Called when dag run state changes to RUNNING."""
@hookspec
def on_dag_run_success(dag_run: DagRun, msg: str):
"""Called when dag run state changes to SUCCESS."""
@hookspec
def on_dag_run_failed(dag_run: DagRun, msg: str):
"""Called when dag run state changes to FAIL."""
| 1,346 | 30.325581 | 62 |
py
|
airflow
|
airflow-main/airflow/api/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Authentication backend."""
from __future__ import annotations
import logging
from importlib import import_module
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException, AirflowException
log = logging.getLogger(__name__)
def load_auth():
"""Load authentication backends."""
auth_backends = "airflow.api.auth.backend.default"
try:
auth_backends = conf.get("api", "auth_backends")
except AirflowConfigException:
pass
backends = []
for backend in auth_backends.split(","):
try:
auth = import_module(backend.strip())
log.info("Loaded API auth backend: %s", backend)
backends.append(auth)
except ImportError as err:
log.critical("Cannot import %s for API authentication due to: %s", backend, err)
raise AirflowException(err)
return backends
| 1,689 | 34.208333 | 92 |
py
|
airflow
|
airflow-main/airflow/api/common/mark_tasks.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Marks tasks APIs."""
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING, Collection, Iterable, Iterator, NamedTuple
from sqlalchemy import or_, select
from sqlalchemy.orm import Session as SASession, lazyload
from airflow.models.dag import DAG
from airflow.models.dagrun import DagRun
from airflow.models.operator import Operator
from airflow.models.taskinstance import TaskInstance
from airflow.operators.subdag import SubDagOperator
from airflow.utils import timezone
from airflow.utils.helpers import exactly_one
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.types import DagRunType
class _DagRunInfo(NamedTuple):
logical_date: datetime
data_interval: tuple[datetime, datetime]
def _create_dagruns(
dag: DAG,
infos: Iterable[_DagRunInfo],
state: DagRunState,
run_type: DagRunType,
) -> Iterable[DagRun]:
"""Infers from data intervals which DAG runs need to be created and does so.
:param dag: The DAG to create runs for.
:param infos: List of logical dates and data intervals to evaluate.
:param state: The state to set the dag run to
:param run_type: The prefix will be used to construct dag run id: ``{run_id_prefix}__{execution_date}``.
:return: Newly created and existing dag runs for the execution dates supplied.
"""
# Find out existing DAG runs that we don't need to create.
dag_runs = {
run.logical_date: run
for run in DagRun.find(dag_id=dag.dag_id, execution_date=[info.logical_date for info in infos])
}
for info in infos:
if info.logical_date in dag_runs:
continue
dag_runs[info.logical_date] = dag.create_dagrun(
execution_date=info.logical_date,
data_interval=info.data_interval,
start_date=timezone.utcnow(),
external_trigger=False,
state=state,
run_type=run_type,
)
return dag_runs.values()
@provide_session
def set_state(
*,
tasks: Collection[Operator | tuple[Operator, int]],
run_id: str | None = None,
execution_date: datetime | None = None,
upstream: bool = False,
downstream: bool = False,
future: bool = False,
past: bool = False,
state: TaskInstanceState = TaskInstanceState.SUCCESS,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the state of a task instance and if needed its relatives.
Can set state for future tasks (calculated from run_id) and retroactively
for past tasks. Will verify integrity of past dag runs in order to create
tasks that did not exist. It will not create dag runs that are missing
on the schedule (but it will, as for subdag, dag runs if needed).
:param tasks: the iterable of tasks or (task, map_index) tuples from which to work.
``task.dag`` needs to be set
:param run_id: the run_id of the dagrun to start looking from
:param execution_date: the execution date from which to start looking (deprecated)
:param upstream: Mark all parents (upstream tasks)
:param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags
:param future: Mark all future tasks on the interval of the dag up until
last execution date.
:param past: Retroactively mark all tasks starting from start_date of the DAG
:param state: State to which the tasks need to be set
:param commit: Commit tasks to be altered to the database
:param session: database session
:return: list of tasks that have been created and updated
"""
if not tasks:
return []
if not exactly_one(execution_date, run_id):
raise ValueError("Exactly one of dag_run_id and execution_date must be set")
if execution_date and not timezone.is_localized(execution_date):
raise ValueError(f"Received non-localized date {execution_date}")
task_dags = {task[0].dag if isinstance(task, tuple) else task.dag for task in tasks}
if len(task_dags) > 1:
raise ValueError(f"Received tasks from multiple DAGs: {task_dags}")
dag = next(iter(task_dags))
if dag is None:
raise ValueError("Received tasks with no DAG")
if execution_date:
run_id = dag.get_dagrun(execution_date=execution_date, session=session).run_id
if not run_id:
raise ValueError("Received tasks with no run_id")
dag_run_ids = get_run_ids(dag, run_id, future, past, session=session)
task_id_map_index_list = list(find_task_relatives(tasks, downstream, upstream))
task_ids = [task_id if isinstance(task_id, str) else task_id[0] for task_id in task_id_map_index_list]
confirmed_infos = list(_iter_existing_dag_run_infos(dag, dag_run_ids, session=session))
confirmed_dates = [info.logical_date for info in confirmed_infos]
sub_dag_run_ids = list(
_iter_subdag_run_ids(dag, session, DagRunState(state), task_ids, commit, confirmed_infos),
)
# now look for the task instances that are affected
qry_dag = get_all_dag_task_query(dag, session, state, task_id_map_index_list, dag_run_ids)
if commit:
tis_altered = session.scalars(qry_dag.with_for_update()).all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += session.scalars(qry_sub_dag.with_for_update()).all()
for task_instance in tis_altered:
# The try_number was decremented when setting to up_for_reschedule and deferred.
# Increment it back when changing the state again
if task_instance.state in (TaskInstanceState.DEFERRED, TaskInstanceState.UP_FOR_RESCHEDULE):
task_instance._try_number += 1
task_instance.set_state(state, session=session)
session.flush()
else:
tis_altered = session.scalars(qry_dag).all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += session.scalars(qry_sub_dag).all()
return tis_altered
def all_subdag_tasks_query(
sub_dag_run_ids: list[str],
session: SASession,
state: TaskInstanceState,
confirmed_dates: Iterable[datetime],
):
"""Get *all* tasks of the sub dags."""
qry_sub_dag = (
select(TaskInstance)
.where(TaskInstance.dag_id.in_(sub_dag_run_ids), TaskInstance.execution_date.in_(confirmed_dates))
.where(or_(TaskInstance.state.is_(None), TaskInstance.state != state))
)
return qry_sub_dag
def get_all_dag_task_query(
dag: DAG,
session: SASession,
state: TaskInstanceState,
task_ids: list[str | tuple[str, int]],
run_ids: Iterable[str],
):
"""Get all tasks of the main dag that will be affected by a state change."""
qry_dag = select(TaskInstance).where(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.run_id.in_(run_ids),
TaskInstance.ti_selector_condition(task_ids),
)
qry_dag = qry_dag.where(or_(TaskInstance.state.is_(None), TaskInstance.state != state)).options(
lazyload(TaskInstance.dag_run)
)
return qry_dag
def _iter_subdag_run_ids(
dag: DAG,
session: SASession,
state: DagRunState,
task_ids: list[str],
commit: bool,
confirmed_infos: Iterable[_DagRunInfo],
) -> Iterator[str]:
"""Go through subdag operators and create dag runs.
We only work within the scope of the subdag. A subdag does not propagate to
its parent DAG, but parent propagates to subdags.
"""
dags = [dag]
while dags:
current_dag = dags.pop()
for task_id in task_ids:
if not current_dag.has_task(task_id):
continue
current_task = current_dag.get_task(task_id)
if isinstance(current_task, SubDagOperator) or current_task.task_type == "SubDagOperator":
# this works as a kind of integrity check
# it creates missing dag runs for subdag operators,
# maybe this should be moved to dagrun.verify_integrity
if TYPE_CHECKING:
assert current_task.subdag
dag_runs = _create_dagruns(
current_task.subdag,
infos=confirmed_infos,
state=DagRunState.RUNNING,
run_type=DagRunType.BACKFILL_JOB,
)
verify_dagruns(dag_runs, commit, state, session, current_task)
dags.append(current_task.subdag)
yield current_task.subdag.dag_id
def verify_dagruns(
dag_runs: Iterable[DagRun],
commit: bool,
state: DagRunState,
session: SASession,
current_task: Operator,
):
"""Verify integrity of dag_runs.
:param dag_runs: dag runs to verify
:param commit: whether dag runs state should be updated
:param state: state of the dag_run to set if commit is True
:param session: session to use
:param current_task: current task
"""
for dag_run in dag_runs:
dag_run.dag = current_task.subdag
dag_run.verify_integrity()
if commit:
dag_run.state = state
session.merge(dag_run)
def _iter_existing_dag_run_infos(dag: DAG, run_ids: list[str], session: SASession) -> Iterator[_DagRunInfo]:
for dag_run in DagRun.find(dag_id=dag.dag_id, run_id=run_ids, session=session):
dag_run.dag = dag
dag_run.verify_integrity(session=session)
yield _DagRunInfo(dag_run.logical_date, dag.get_run_data_interval(dag_run))
def find_task_relatives(tasks, downstream, upstream):
"""Yield task ids and optionally ancestor and descendant ids."""
for item in tasks:
if isinstance(item, tuple):
task, map_index = item
yield task.task_id, map_index
else:
task = item
yield task.task_id
if downstream:
for relative in task.get_flat_relatives(upstream=False):
yield relative.task_id
if upstream:
for relative in task.get_flat_relatives(upstream=True):
yield relative.task_id
@provide_session
def get_execution_dates(
dag: DAG, execution_date: datetime, future: bool, past: bool, *, session: SASession = NEW_SESSION
) -> list[datetime]:
"""Return DAG execution dates."""
latest_execution_date = dag.get_latest_execution_date(session=session)
if latest_execution_date is None:
raise ValueError(f"Received non-localized date {execution_date}")
execution_date = timezone.coerce_datetime(execution_date)
# determine date range of dag runs and tasks to consider
end_date = latest_execution_date if future else execution_date
if dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if not dag.timetable.can_be_scheduled:
# If the DAG never schedules, need to look at existing DagRun if the user wants future or
# past runs.
dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date)
dates = sorted({d.execution_date for d in dag_runs})
elif not dag.timetable.periodic:
dates = [start_date]
else:
dates = [
info.logical_date for info in dag.iter_dagrun_infos_between(start_date, end_date, align=False)
]
return dates
@provide_session
def get_run_ids(dag: DAG, run_id: str, future: bool, past: bool, session: SASession = NEW_SESSION):
"""Return DAG executions' run_ids."""
last_dagrun = dag.get_last_dagrun(include_externally_triggered=True, session=session)
current_dagrun = dag.get_dagrun(run_id=run_id, session=session)
first_dagrun = session.scalar(
select(DagRun).filter(DagRun.dag_id == dag.dag_id).order_by(DagRun.execution_date.asc()).limit(1)
)
if last_dagrun is None:
raise ValueError(f"DagRun for {dag.dag_id} not found")
# determine run_id range of dag runs and tasks to consider
end_date = last_dagrun.logical_date if future else current_dagrun.logical_date
start_date = current_dagrun.logical_date if not past else first_dagrun.logical_date
if not dag.timetable.can_be_scheduled:
# If the DAG never schedules, need to look at existing DagRun if the user wants future or
# past runs.
dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date, session=session)
run_ids = sorted({d.run_id for d in dag_runs})
elif not dag.timetable.periodic:
run_ids = [run_id]
else:
dates = [
info.logical_date for info in dag.iter_dagrun_infos_between(start_date, end_date, align=False)
]
run_ids = [dr.run_id for dr in DagRun.find(dag_id=dag.dag_id, execution_date=dates, session=session)]
return run_ids
def _set_dag_run_state(dag_id: str, run_id: str, state: DagRunState, session: SASession):
"""
Set dag run state in the DB.
:param dag_id: dag_id of target dag run
:param run_id: run id of target dag run
:param state: target state
:param session: database session
"""
dag_run = session.execute(
select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == run_id)
).scalar_one()
dag_run.state = state
if state == DagRunState.RUNNING:
dag_run.start_date = timezone.utcnow()
dag_run.end_date = None
else:
dag_run.end_date = timezone.utcnow()
session.merge(dag_run)
@provide_session
def set_dag_run_state_to_success(
*,
dag: DAG,
execution_date: datetime | None = None,
run_id: str | None = None,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the dag run's state to success.
Set for a specific execution date and its task instances to success.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking(deprecated)
:param run_id: the run_id to start looking from
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: ValueError if dag or execution_date is invalid
"""
if not exactly_one(execution_date, run_id):
return []
if not dag:
return []
if execution_date:
if not timezone.is_localized(execution_date):
raise ValueError(f"Received non-localized date {execution_date}")
dag_run = dag.get_dagrun(execution_date=execution_date)
if not dag_run:
raise ValueError(f"DagRun with execution_date: {execution_date} not found")
run_id = dag_run.run_id
if not run_id:
raise ValueError(f"Invalid dag_run_id: {run_id}")
# Mark the dag run to success.
if commit:
_set_dag_run_state(dag.dag_id, run_id, DagRunState.SUCCESS, session)
# Mark all task instances of the dag run to success.
for task in dag.tasks:
task.dag = dag
return set_state(
tasks=dag.tasks,
run_id=run_id,
state=TaskInstanceState.SUCCESS,
commit=commit,
session=session,
)
@provide_session
def set_dag_run_state_to_failed(
*,
dag: DAG,
execution_date: datetime | None = None,
run_id: str | None = None,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the dag run's state to failed.
Set for a specific execution date and its task instances to failed.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking(deprecated)
:param run_id: the DAG run_id to start looking from
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: AssertionError if dag or execution_date is invalid
"""
if not exactly_one(execution_date, run_id):
return []
if not dag:
return []
if execution_date:
if not timezone.is_localized(execution_date):
raise ValueError(f"Received non-localized date {execution_date}")
dag_run = dag.get_dagrun(execution_date=execution_date)
if not dag_run:
raise ValueError(f"DagRun with execution_date: {execution_date} not found")
run_id = dag_run.run_id
if not run_id:
raise ValueError(f"Invalid dag_run_id: {run_id}")
# Mark the dag run to failed.
if commit:
_set_dag_run_state(dag.dag_id, run_id, DagRunState.FAILED, session)
running_states = (
TaskInstanceState.RUNNING,
TaskInstanceState.DEFERRED,
TaskInstanceState.UP_FOR_RESCHEDULE,
)
# Mark only RUNNING task instances.
task_ids = [task.task_id for task in dag.tasks]
tis = session.scalars(
select(TaskInstance).where(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.run_id == run_id,
TaskInstance.task_id.in_(task_ids),
TaskInstance.state.in_(running_states),
)
)
task_ids_of_running_tis = [task_instance.task_id for task_instance in tis]
tasks = []
for task in dag.tasks:
if task.task_id not in task_ids_of_running_tis:
continue
task.dag = dag
tasks.append(task)
# Mark non-finished tasks as SKIPPED.
tis = session.scalars(
select(TaskInstance).filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.run_id == run_id,
TaskInstance.state.not_in(State.finished),
TaskInstance.state.not_in(running_states),
)
).all()
if commit:
for ti in tis:
ti.set_state(TaskInstanceState.SKIPPED)
return tis + set_state(
tasks=tasks,
run_id=run_id,
state=TaskInstanceState.FAILED,
commit=commit,
session=session,
)
def __set_dag_run_state_to_running_or_queued(
*,
new_state: DagRunState,
dag: DAG,
execution_date: datetime | None = None,
run_id: str | None = None,
commit: bool = False,
session: SASession,
) -> list[TaskInstance]:
"""
Set the dag run for a specific execution date to running.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param run_id: the id of the DagRun
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
"""
res: list[TaskInstance] = []
if not (execution_date is None) ^ (run_id is None):
return res
if not dag:
return res
if execution_date:
if not timezone.is_localized(execution_date):
raise ValueError(f"Received non-localized date {execution_date}")
dag_run = dag.get_dagrun(execution_date=execution_date)
if not dag_run:
raise ValueError(f"DagRun with execution_date: {execution_date} not found")
run_id = dag_run.run_id
if not run_id:
raise ValueError(f"DagRun with run_id: {run_id} not found")
# Mark the dag run to running.
if commit:
_set_dag_run_state(dag.dag_id, run_id, new_state, session)
# To keep the return type consistent with the other similar functions.
return res
@provide_session
def set_dag_run_state_to_running(
*,
dag: DAG,
execution_date: datetime | None = None,
run_id: str | None = None,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the dag run's state to running.
Set for a specific execution date and its task instances to running.
"""
return __set_dag_run_state_to_running_or_queued(
new_state=DagRunState.RUNNING,
dag=dag,
execution_date=execution_date,
run_id=run_id,
commit=commit,
session=session,
)
@provide_session
def set_dag_run_state_to_queued(
*,
dag: DAG,
execution_date: datetime | None = None,
run_id: str | None = None,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the dag run's state to queued.
Set for a specific execution date and its task instances to queued.
"""
return __set_dag_run_state_to_running_or_queued(
new_state=DagRunState.QUEUED,
dag=dag,
execution_date=execution_date,
run_id=run_id,
commit=commit,
session=session,
)
| 21,855 | 34.770867 | 109 |
py
|
airflow
|
airflow-main/airflow/api/common/trigger_dag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Triggering DAG runs APIs."""
from __future__ import annotations
import json
from datetime import datetime
from airflow.exceptions import DagNotFound, DagRunAlreadyExists
from airflow.models import DagBag, DagModel, DagRun
from airflow.utils import timezone
from airflow.utils.state import DagRunState
from airflow.utils.types import DagRunType
def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
run_id: str | None = None,
conf: dict | str | None = None,
execution_date: datetime | None = None,
replace_microseconds: bool = True,
) -> list[DagRun | None]:
"""Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
if dag is None or dag_id not in dag_bag.dags:
raise DagNotFound(f"Dag id {dag_id} not found")
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if dag.default_args and "start_date" in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and execution_date < min_dag_start_date:
raise ValueError(
f"The execution_date [{execution_date.isoformat()}] should be >= start_date "
f"[{min_dag_start_date.isoformat()}] from DAG's default_args"
)
logical_date = timezone.coerce_datetime(execution_date)
data_interval = dag.timetable.infer_manual_data_interval(run_after=logical_date)
run_id = run_id or dag.timetable.generate_run_id(
run_type=DagRunType.MANUAL, logical_date=logical_date, data_interval=data_interval
)
dag_run = DagRun.find_duplicate(dag_id=dag_id, execution_date=execution_date, run_id=run_id)
if dag_run:
raise DagRunAlreadyExists(dag_run=dag_run, execution_date=execution_date, run_id=run_id)
run_conf = None
if conf:
run_conf = conf if isinstance(conf, dict) else json.loads(conf)
dag_runs = []
dags_to_run = [dag] + dag.subdags
for _dag in dags_to_run:
dag_run = _dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=DagRunState.QUEUED,
conf=run_conf,
external_trigger=True,
dag_hash=dag_bag.dags_hash.get(dag_id),
data_interval=data_interval,
)
dag_runs.append(dag_run)
return dag_runs
def trigger_dag(
dag_id: str,
run_id: str | None = None,
conf: dict | str | None = None,
execution_date: datetime | None = None,
replace_microseconds: bool = True,
) -> DagRun | None:
"""Triggers execution of DAG specified by dag_id.
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
"""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {dag_id} not found in DagModel")
dagbag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
triggers = _trigger_dag(
dag_id=dag_id,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
return triggers[0] if triggers else None
| 4,678 | 34.44697 | 96 |
py
|
airflow
|
airflow-main/airflow/api/common/airflow_health.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner
from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
from airflow.jobs.triggerer_job_runner import TriggererJobRunner
HEALTHY = "healthy"
UNHEALTHY = "unhealthy"
def get_airflow_health() -> dict[str, Any]:
"""Get the health for Airflow metadatabase, scheduler and triggerer."""
metadatabase_status = HEALTHY
latest_scheduler_heartbeat = None
latest_triggerer_heartbeat = None
latest_dag_processor_heartbeat = None
scheduler_status = UNHEALTHY
triggerer_status: str | None = UNHEALTHY
dag_processor_status: str | None = UNHEALTHY
try:
latest_scheduler_job = SchedulerJobRunner.most_recent_job()
if latest_scheduler_job:
latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat()
if latest_scheduler_job.is_alive():
scheduler_status = HEALTHY
except Exception:
metadatabase_status = UNHEALTHY
try:
latest_triggerer_job = TriggererJobRunner.most_recent_job()
if latest_triggerer_job:
latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat()
if latest_triggerer_job.is_alive():
triggerer_status = HEALTHY
else:
triggerer_status = None
except Exception:
metadatabase_status = UNHEALTHY
try:
latest_dag_processor_job = DagProcessorJobRunner.most_recent_job()
if latest_dag_processor_job:
latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat()
if latest_dag_processor_job.is_alive():
dag_processor_status = HEALTHY
else:
dag_processor_status = None
except Exception:
metadatabase_status = UNHEALTHY
airflow_health_status = {
"metadatabase": {"status": metadatabase_status},
"scheduler": {
"status": scheduler_status,
"latest_scheduler_heartbeat": latest_scheduler_heartbeat,
},
"triggerer": {
"status": triggerer_status,
"latest_triggerer_heartbeat": latest_triggerer_heartbeat,
},
"dag_processor": {
"status": dag_processor_status,
"latest_dag_processor_heartbeat": latest_dag_processor_heartbeat,
},
}
return airflow_health_status
| 3,268 | 35.322222 | 98 |
py
|
airflow
|
airflow-main/airflow/api/common/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/api/common/delete_dag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Delete DAGs APIs."""
from __future__ import annotations
import logging
from sqlalchemy import and_, delete, or_, select
from sqlalchemy.orm import Session
from airflow import models
from airflow.exceptions import AirflowException, DagNotFound
from airflow.models import DagModel, TaskFail
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils.db import get_sqla_model_classes
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import TaskInstanceState
log = logging.getLogger(__name__)
@provide_session
def delete_dag(dag_id: str, keep_records_in_log: bool = True, session: Session = NEW_SESSION) -> int:
"""
Delete a DAG by a dag_id.
:param dag_id: the dag_id of the DAG to delete
:param keep_records_in_log: whether keep records of the given dag_id
in the Log table in the backend database (for reasons like auditing).
The default value is True.
:param session: session used
:return count of deleted dags
"""
log.info("Deleting DAG: %s", dag_id)
running_tis = session.scalar(
select(models.TaskInstance.state)
.where(models.TaskInstance.dag_id == dag_id)
.where(models.TaskInstance.state == TaskInstanceState.RUNNING)
.limit(1)
)
if running_tis:
raise AirflowException("TaskInstances still running")
dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id).limit(1))
if dag is None:
raise DagNotFound(f"Dag id {dag_id} not found")
# deleting a DAG should also delete all of its subdags
dags_to_delete_query = session.execute(
select(DagModel.dag_id).where(
or_(
DagModel.dag_id == dag_id,
and_(DagModel.dag_id.like(f"{dag_id}.%"), DagModel.is_subdag),
)
)
)
dags_to_delete = [dag_id for dag_id, in dags_to_delete_query]
# Scheduler removes DAGs without files from serialized_dag table every dag_dir_list_interval.
# There may be a lag, so explicitly removes serialized DAG here.
if SerializedDagModel.has_dag(dag_id=dag_id, session=session):
SerializedDagModel.remove_dag(dag_id=dag_id, session=session)
count = 0
for model in get_sqla_model_classes():
if hasattr(model, "dag_id"):
if keep_records_in_log and model.__name__ == "Log":
continue
count += session.execute(
delete(model)
.where(model.dag_id.in_(dags_to_delete))
.execution_options(synchronize_session="fetch")
).rowcount
if dag.is_subdag:
parent_dag_id, task_id = dag_id.rsplit(".", 1)
for model in TaskFail, models.TaskInstance:
count += session.execute(
delete(model).where(model.dag_id == parent_dag_id, model.task_id == task_id)
).rowcount
# Delete entries in Import Errors table for a deleted DAG
# This handles the case when the dag_id is changed in the file
session.execute(
delete(models.ImportError)
.where(models.ImportError.filename == dag.fileloc)
.execution_options(synchronize_session="fetch")
)
return count
| 4,012 | 36.858491 | 101 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/get_task_instance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task instance APIs."""
from __future__ import annotations
from datetime import datetime
from deprecated import deprecated
from airflow.api.common.experimental import check_and_get_dag, check_and_get_dagrun
from airflow.exceptions import TaskInstanceNotFound
from airflow.models import TaskInstance
@deprecated(version="2.2.4", reason="Use DagRun.get_task_instance instead")
def get_task_instance(dag_id: str, task_id: str, execution_date: datetime) -> TaskInstance:
"""Return the task instance identified by the given dag_id, task_id and execution_date."""
dag = check_and_get_dag(dag_id, task_id)
dagrun = check_and_get_dagrun(dag=dag, execution_date=execution_date)
# Get task instance object and check that it exists
task_instance = dagrun.get_task_instance(task_id)
if not task_instance:
error_message = f"Task {task_id} instance for date {execution_date} not found"
raise TaskInstanceNotFound(error_message)
return task_instance
| 1,775 | 40.302326 | 94 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/mark_tasks.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task Instance APIs."""
from __future__ import annotations
import warnings
from airflow.api.common.mark_tasks import ( # noqa
_create_dagruns,
set_dag_run_state_to_failed,
set_dag_run_state_to_running,
set_dag_run_state_to_success,
set_state,
)
warnings.warn(
"This module is deprecated. Please use `airflow.api.common.mark_tasks` instead.",
DeprecationWarning,
stacklevel=2,
)
| 1,203 | 32.444444 | 85 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/trigger_dag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.api.common.trigger_dag import * # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.api.common.trigger_dag` instead.",
DeprecationWarning,
stacklevel=2,
)
| 1,040 | 34.896552 | 86 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/get_dag_run_state.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""DAG run APIs."""
from __future__ import annotations
from datetime import datetime
from deprecated import deprecated
from airflow.api.common.experimental import check_and_get_dag, check_and_get_dagrun
@deprecated(reason="Use DagRun().get_state() instead", version="2.2.4")
def get_dag_run_state(dag_id: str, execution_date: datetime) -> dict[str, str]:
"""Return the Dag Run state identified by the given dag_id and execution_date.
:param dag_id: DAG id
:param execution_date: execution date
:return: Dictionary storing state of the object
"""
dag = check_and_get_dag(dag_id=dag_id)
dagrun = check_and_get_dagrun(dag, execution_date)
return {"state": dagrun.get_state()}
| 1,500 | 35.609756 | 83 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/get_lineage.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Lineage APIs."""
from __future__ import annotations
import collections
import datetime
from typing import Any
from sqlalchemy.orm import Session
from airflow.api.common.experimental import check_and_get_dag, check_and_get_dagrun
from airflow.lineage import PIPELINE_INLETS, PIPELINE_OUTLETS
from airflow.models.xcom import XCom
from airflow.utils.session import NEW_SESSION, provide_session
@provide_session
def get_lineage(
dag_id: str, execution_date: datetime.datetime, *, session: Session = NEW_SESSION
) -> dict[str, dict[str, Any]]:
"""Get lineage information for dag specified."""
dag = check_and_get_dag(dag_id)
dagrun = check_and_get_dagrun(dag, execution_date)
inlets = XCom.get_many(dag_ids=dag_id, run_id=dagrun.run_id, key=PIPELINE_INLETS, session=session)
outlets = XCom.get_many(dag_ids=dag_id, run_id=dagrun.run_id, key=PIPELINE_OUTLETS, session=session)
lineage: dict[str, dict[str, Any]] = collections.defaultdict(dict)
for meta in inlets:
lineage[meta.task_id]["inlets"] = meta.value
for meta in outlets:
lineage[meta.task_id]["outlets"] = meta.value
return {"task_ids": {k: v for k, v in lineage.items()}}
| 1,980 | 37.843137 | 104 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/get_dag_runs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""DAG runs APIs."""
from __future__ import annotations
from typing import Any
from flask import url_for
from airflow.api.common.experimental import check_and_get_dag
from airflow.models import DagRun
from airflow.utils.state import DagRunState
def get_dag_runs(dag_id: str, state: str | None = None) -> list[dict[str, Any]]:
"""
Return a list of Dag Runs for a specific DAG ID.
:param dag_id: String identifier of a DAG
:param state: queued|running|success...
:return: List of DAG runs of a DAG with requested state,
or all runs if the state is not specified
"""
check_and_get_dag(dag_id=dag_id)
dag_runs = []
state = DagRunState(state.lower()) if state else None
for run in DagRun.find(dag_id=dag_id, state=state):
dag_runs.append(
{
"id": run.id,
"run_id": run.run_id,
"state": run.state,
"dag_id": run.dag_id,
"execution_date": run.execution_date.isoformat(),
"start_date": ((run.start_date or "") and run.start_date.isoformat()),
"dag_run_url": url_for("Airflow.graph", dag_id=run.dag_id, execution_date=run.execution_date),
}
)
return dag_runs
| 2,049 | 34.964912 | 110 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/get_task.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task APIs."""
from __future__ import annotations
from deprecated import deprecated
from airflow.api.common.experimental import check_and_get_dag
from airflow.models import TaskInstance
@deprecated(reason="Use DAG().get_task", version="2.2.4")
def get_task(dag_id: str, task_id: str) -> TaskInstance:
"""Return the task object identified by the given dag_id and task_id."""
dag = check_and_get_dag(dag_id, task_id)
# Return the task.
return dag.get_task(task_id)
| 1,273 | 36.470588 | 76 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/get_code.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Get code APIs."""
from __future__ import annotations
from deprecated import deprecated
from airflow.api.common.experimental import check_and_get_dag
from airflow.exceptions import AirflowException, DagCodeNotFound
from airflow.models.dagcode import DagCode
@deprecated(reason="Use DagCode().get_code_by_fileloc() instead", version="2.2.4")
def get_code(dag_id: str) -> str:
"""Return python code of a given dag_id.
:param dag_id: DAG id
:return: code of the DAG
"""
dag = check_and_get_dag(dag_id=dag_id)
try:
return DagCode.get_code_by_fileloc(dag.fileloc)
except (OSError, DagCodeNotFound) as exception:
error_message = f"Error {str(exception)} while reading Dag id {dag_id} Code"
raise AirflowException(error_message, exception)
| 1,580 | 36.642857 | 84 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Experimental APIs."""
from __future__ import annotations
from datetime import datetime
from airflow.exceptions import DagNotFound, DagRunNotFound, TaskNotFound
from airflow.models import DagBag, DagModel, DagRun
def check_and_get_dag(dag_id: str, task_id: str | None = None) -> DagModel:
"""Check DAG existence and in case it is specified that Task exists."""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {dag_id} not found in DagModel")
dagbag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
dag = dagbag.get_dag(dag_id)
if not dag:
error_message = f"Dag id {dag_id} not found"
raise DagNotFound(error_message)
if task_id and not dag.has_task(task_id):
error_message = f"Task {task_id} not found in dag {dag_id}"
raise TaskNotFound(error_message)
return dag
def check_and_get_dagrun(dag: DagModel, execution_date: datetime) -> DagRun:
"""Get DagRun object and check that it exists."""
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
error_message = f"Dag Run for date {execution_date} not found in dag {dag.dag_id}"
raise DagRunNotFound(error_message)
return dagrun
| 2,050 | 39.215686 | 90 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/delete_dag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.api.common.delete_dag import * # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.api.common.delete_dag` instead.",
DeprecationWarning,
stacklevel=2,
)
| 1,038 | 34.827586 | 85 |
py
|
airflow
|
airflow-main/airflow/api/common/experimental/pool.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pool APIs."""
from __future__ import annotations
from deprecated import deprecated
from sqlalchemy import select
from sqlalchemy.orm import Session
from airflow.exceptions import AirflowBadRequest, PoolNotFound
from airflow.models import Pool
from airflow.utils.session import NEW_SESSION, provide_session
@deprecated(reason="Use Pool.get_pool() instead", version="2.2.4")
@provide_session
def get_pool(name, session: Session = NEW_SESSION):
"""Get pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool = session.scalar(select(Pool).filter_by(pool=name).limit(1))
if pool is None:
raise PoolNotFound(f"Pool '{name}' doesn't exist")
return pool
@deprecated(reason="Use Pool.get_pools() instead", version="2.2.4")
@provide_session
def get_pools(session: Session = NEW_SESSION):
"""Get all pools."""
return session.query(Pool).all()
@deprecated(reason="Use Pool.create_pool() instead", version="2.2.4")
@provide_session
def create_pool(name, slots, description, session: Session = NEW_SESSION):
"""Create a pool with given parameters."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
try:
slots = int(slots)
except ValueError:
raise AirflowBadRequest(f"Bad value for `slots`: {slots}")
# Get the length of the pool column
pool_name_length = Pool.pool.property.columns[0].type.length
if len(name) > pool_name_length:
raise AirflowBadRequest(f"Pool name can't be more than {pool_name_length} characters")
session.expire_on_commit = False
pool = session.scalar(select(Pool).filter_by(pool=name).limit(1))
if pool is None:
pool = Pool(pool=name, slots=slots, description=description)
session.add(pool)
else:
pool.slots = slots
pool.description = description
session.commit()
return pool
@deprecated(reason="Use Pool.delete_pool() instead", version="2.2.4")
@provide_session
def delete_pool(name, session: Session = NEW_SESSION):
"""Delete pool by a given name."""
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
if name == Pool.DEFAULT_POOL_NAME:
raise AirflowBadRequest(f"{Pool.DEFAULT_POOL_NAME} cannot be deleted")
pool = session.scalar(select(Pool).filter_by(pool=name).limit(1))
if pool is None:
raise PoolNotFound(f"Pool '{name}' doesn't exist")
session.delete(pool)
session.commit()
return pool
| 3,368 | 32.69 | 94 |
py
|
airflow
|
airflow-main/airflow/api/auth/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/api/auth/backend/basic_auth.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Basic authentication backend."""
from __future__ import annotations
from functools import wraps
from typing import Any, Callable, TypeVar, cast
from flask import Response, request
from flask_appbuilder.const import AUTH_LDAP
from flask_login import login_user
from airflow.utils.airflow_flask_app import get_airflow_app
from airflow.www.fab_security.sqla.models import User
CLIENT_AUTH: tuple[str, str] | Any | None = None
def init_app(_):
"""Initialize authentication backend."""
T = TypeVar("T", bound=Callable)
def auth_current_user() -> User | None:
"""Authenticate and set current user if Authorization header exists."""
auth = request.authorization
if auth is None or not auth.username or not auth.password:
return None
ab_security_manager = get_airflow_app().appbuilder.sm
user = None
if ab_security_manager.auth_type == AUTH_LDAP:
user = ab_security_manager.auth_user_ldap(auth.username, auth.password)
if user is None:
user = ab_security_manager.auth_user_db(auth.username, auth.password)
if user is not None:
login_user(user, remember=False)
return user
def requires_authentication(function: T):
"""Decorate functions that require authentication."""
@wraps(function)
def decorated(*args, **kwargs):
if auth_current_user() is not None:
return function(*args, **kwargs)
else:
return Response("Unauthorized", 401, {"WWW-Authenticate": "Basic"})
return cast(T, decorated)
| 2,309 | 32.970588 | 79 |
py
|
airflow
|
airflow-main/airflow/api/auth/backend/deny_all.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Authentication backend that denies all requests."""
from __future__ import annotations
from functools import wraps
from typing import Any, Callable, TypeVar, cast
from flask import Response
CLIENT_AUTH: tuple[str, str] | Any | None = None
def init_app(_):
"""Initialize authentication."""
T = TypeVar("T", bound=Callable)
def requires_authentication(function: T):
"""Decorate functions that require authentication."""
@wraps(function)
def decorated(*args, **kwargs):
return Response("Forbidden", 403)
return cast(T, decorated)
| 1,356 | 29.840909 | 62 |
py
|
airflow
|
airflow-main/airflow/api/auth/backend/session.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Session authentication backend."""
from __future__ import annotations
from functools import wraps
from typing import Any, Callable, TypeVar, cast
from flask import Response
from airflow.configuration import auth_manager
CLIENT_AUTH: tuple[str, str] | Any | None = None
def init_app(_):
"""Initialize authentication backend."""
T = TypeVar("T", bound=Callable)
def requires_authentication(function: T):
"""Decorate functions that require authentication."""
@wraps(function)
def decorated(*args, **kwargs):
if not auth_manager.is_logged_in():
return Response("Unauthorized", 401, {})
return function(*args, **kwargs)
return cast(T, decorated)
| 1,489 | 30.702128 | 62 |
py
|
airflow
|
airflow-main/airflow/api/auth/backend/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/api/auth/backend/kerberos_auth.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.utils.airflow_flask_app import get_airflow_app
#
# Copyright (c) 2013, Michael Komitee
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Kerberos authentication module"""
import logging
import os
from functools import wraps
from typing import Any, Callable, TypeVar, cast
import kerberos
from flask import Response, _request_ctx_stack as stack, g, make_response, request # type: ignore
from requests_kerberos import HTTPKerberosAuth
from airflow.configuration import conf
from airflow.utils.net import getfqdn
log = logging.getLogger(__name__)
CLIENT_AUTH: tuple[str, str] | Any | None = HTTPKerberosAuth(service="airflow")
class KerberosService:
"""Class to keep information about the Kerberos Service initialized."""
def __init__(self):
self.service_name = None
# Stores currently initialized Kerberos Service
_KERBEROS_SERVICE = KerberosService()
def init_app(app):
"""Initialize application with kerberos."""
hostname = app.config.get("SERVER_NAME")
if not hostname:
hostname = getfqdn()
log.info("Kerberos: hostname %s", hostname)
service = "airflow"
_KERBEROS_SERVICE.service_name = f"{service}@{hostname}"
if "KRB5_KTNAME" not in os.environ:
os.environ["KRB5_KTNAME"] = conf.get("kerberos", "keytab")
try:
log.info("Kerberos init: %s %s", service, hostname)
principal = kerberos.getServerPrincipalDetails(service, hostname)
except kerberos.KrbError as err:
log.warning("Kerberos: %s", err)
else:
log.info("Kerberos API: server is %s", principal)
def _unauthorized():
"""Indicate that authorization is required."""
return Response("Unauthorized", 401, {"WWW-Authenticate": "Negotiate"})
def _forbidden():
return Response("Forbidden", 403)
def _gssapi_authenticate(token):
state = None
ctx = stack.top
try:
return_code, state = kerberos.authGSSServerInit(_KERBEROS_SERVICE.service_name)
if return_code != kerberos.AUTH_GSS_COMPLETE:
return None
return_code = kerberos.authGSSServerStep(state, token)
if return_code == kerberos.AUTH_GSS_COMPLETE:
ctx.kerberos_token = kerberos.authGSSServerResponse(state)
ctx.kerberos_user = kerberos.authGSSServerUserName(state)
return return_code
if return_code == kerberos.AUTH_GSS_CONTINUE:
return kerberos.AUTH_GSS_CONTINUE
return None
except kerberos.GSSError:
return None
finally:
if state:
kerberos.authGSSServerClean(state)
T = TypeVar("T", bound=Callable)
def requires_authentication(function: T):
"""Decorate functions that require authentication with Kerberos."""
@wraps(function)
def decorated(*args, **kwargs):
header = request.headers.get("Authorization")
if header:
ctx = stack.top
token = "".join(header.split()[1:])
return_code = _gssapi_authenticate(token)
if return_code == kerberos.AUTH_GSS_COMPLETE:
g.user = get_airflow_app().appbuilder.sm.find_user(username=ctx.kerberos_user)
response = function(*args, **kwargs)
response = make_response(response)
if ctx.kerberos_token is not None:
response.headers["WWW-Authenticate"] = " ".join(["negotiate", ctx.kerberos_token])
return response
if return_code != kerberos.AUTH_GSS_CONTINUE:
return _forbidden()
return _unauthorized()
return cast(T, decorated)
| 5,664 | 35.548387 | 102 |
py
|
airflow
|
airflow-main/airflow/api/auth/backend/default.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Default authentication backend - everything is allowed."""
from __future__ import annotations
from functools import wraps
from typing import Any, Callable, TypeVar, cast
CLIENT_AUTH: tuple[str, str] | Any | None = None
def init_app(_):
"""Initialize authentication backend."""
T = TypeVar("T", bound=Callable)
def requires_authentication(function: T):
"""Decorate functions that require authentication."""
@wraps(function)
def decorated(*args, **kwargs):
return function(*args, **kwargs)
return cast(T, decorated)
| 1,342 | 30.97619 | 62 |
py
|
airflow
|
airflow-main/airflow/api/client/json_client.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""JSON API Client."""
from __future__ import annotations
from urllib.parse import urljoin
from airflow.api.client import api_client
class Client(api_client.Client):
"""Json API client implementation.
This client is used to interact with a Json API server and perform various actions
such as triggering DAG runs,deleting DAGs, interacting with pools, and getting lineage information.
"""
def _request(self, url: str, json=None, method: str = "GET") -> dict:
"""Make a request to the Json API server.
:param url: The URL to send the request to.
:param method: The HTTP method to use (e.g. "GET", "POST", "DELETE").
:param json: A dictionary containing JSON data to send in the request body.
:return: A dictionary containing the JSON response from the server.
:raises OSError: If the server returns an error status.
"""
params = {
"url": url,
}
if json is not None:
params["json"] = json
resp = getattr(self._session, method.lower())(**params)
if resp.is_error:
# It is justified here because there might be many resp types.
try:
data = resp.json()
except Exception:
data = {}
raise OSError(data.get("error", "Server error"))
return resp.json()
def trigger_dag(self, dag_id, run_id=None, conf=None, execution_date=None, replace_microseconds=True):
"""Trigger a DAG run.
:param dag_id: The ID of the DAG to trigger.
:param run_id: The ID of the DAG run to create. If not provided, a default ID will be generated.
:param conf: A dictionary containing configuration data to pass to the DAG run.
:param execution_date: The execution date for the DAG run, in the format "YYYY-MM-DDTHH:MM:SS".
:param replace_microseconds: Whether to replace microseconds in the execution date with zeros.
:return: A message indicating the status of the DAG run trigger.
"""
endpoint = f"/api/experimental/dags/{dag_id}/dag_runs"
url = urljoin(self._api_base_url, endpoint)
data = {
"run_id": run_id,
"conf": conf,
"execution_date": execution_date,
"replace_microseconds": replace_microseconds,
}
return self._request(url, method="POST", json=data)["message"]
def delete_dag(self, dag_id: str):
"""Delete a DAG.
:param dag_id: The ID of the DAG to delete.
:return: A message indicating the status of the DAG delete operation.
"""
endpoint = f"/api/experimental/dags/{dag_id}/delete_dag"
url = urljoin(self._api_base_url, endpoint)
data = self._request(url, method="DELETE")
return data["message"]
def get_pool(self, name: str):
"""Get information about a specific pool.
:param name: The name of the pool to retrieve information for.
:return: A tuple containing the name of the pool, the number of
slots in the pool, and a description of the pool.
"""
endpoint = f"/api/experimental/pools/{name}"
url = urljoin(self._api_base_url, endpoint)
pool = self._request(url)
return pool["pool"], pool["slots"], pool["description"]
def get_pools(self):
"""Get a list of all pools.
:return: A list of tuples, each containing the name of a pool,
the number of slots in the pool, and a description of the pool.
"""
endpoint = "/api/experimental/pools"
url = urljoin(self._api_base_url, endpoint)
pools = self._request(url)
return [(p["pool"], p["slots"], p["description"]) for p in pools]
def create_pool(self, name: str, slots: int, description: str):
"""Create a new pool.
:param name: The name of the pool to create.
:param slots: The number of slots in the pool.
:param description: A description of the pool.
:return: A tuple containing the name of the pool, the number of slots in the pool,
and a description of the pool.
"""
endpoint = "/api/experimental/pools"
data = {
"name": name,
"slots": slots,
"description": description,
}
response = self._request(urljoin(self._api_base_url, endpoint), method="POST", json=data)
return response["pool"], response["slots"], response["description"]
def delete_pool(self, name: str):
"""Delete a pool.
:param name: The name of the pool to delete.
:return: A tuple containing the name of the pool, the number
of slots in the pool, and a description of the pool.
"""
endpoint = f"/api/experimental/pools/{name}"
url = urljoin(self._api_base_url, endpoint)
pool = self._request(url, method="DELETE")
return pool["pool"], pool["slots"], pool["description"]
def get_lineage(self, dag_id: str, execution_date: str):
"""Get the lineage of a DAG run.
:param dag_id: The ID of the DAG.
:param execution_date: The execution date of the DAG run, in the format "YYYY-MM-DDTHH:MM:SS".
:return: A message indicating the status of the lineage request.
"""
endpoint = f"/api/experimental/lineage/{dag_id}/{execution_date}"
url = urljoin(self._api_base_url, endpoint)
data = self._request(url, method="GET")
return data["message"]
| 6,330 | 40.651316 | 106 |
py
|
airflow
|
airflow-main/airflow/api/client/local_client.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Local client API."""
from __future__ import annotations
from airflow.api.client import api_client
from airflow.api.common import delete_dag, trigger_dag
from airflow.api.common.experimental.get_lineage import get_lineage as get_lineage_api
from airflow.exceptions import AirflowBadRequest, PoolNotFound
from airflow.models.pool import Pool
class Client(api_client.Client):
"""Local API client implementation."""
def trigger_dag(
self, dag_id, run_id=None, conf=None, execution_date=None, replace_microseconds=True
) -> dict | None:
dag_run = trigger_dag.trigger_dag(
dag_id=dag_id,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
if dag_run:
return {
"conf": dag_run.conf,
"dag_id": dag_run.dag_id,
"dag_run_id": dag_run.run_id,
"data_interval_start": dag_run.data_interval_start,
"data_interval_end": dag_run.data_interval_end,
"end_date": dag_run.end_date,
"external_trigger": dag_run.external_trigger,
"last_scheduling_decision": dag_run.last_scheduling_decision,
"logical_date": dag_run.logical_date,
"run_type": dag_run.run_type,
"start_date": dag_run.start_date,
"state": dag_run.state,
}
return dag_run
def delete_dag(self, dag_id):
count = delete_dag.delete_dag(dag_id)
return f"Removed {count} record(s)"
def get_pool(self, name):
pool = Pool.get_pool(pool_name=name)
if not pool:
raise PoolNotFound(f"Pool {name} not found")
return pool.pool, pool.slots, pool.description
def get_pools(self):
return [(p.pool, p.slots, p.description) for p in Pool.get_pools()]
def create_pool(self, name, slots, description):
if not (name and name.strip()):
raise AirflowBadRequest("Pool name shouldn't be empty")
pool_name_length = Pool.pool.property.columns[0].type.length
if len(name) > pool_name_length:
raise AirflowBadRequest(f"pool name cannot be more than {pool_name_length} characters")
try:
slots = int(slots)
except ValueError:
raise AirflowBadRequest(f"Bad value for `slots`: {slots}")
pool = Pool.create_or_update_pool(name=name, slots=slots, description=description)
return pool.pool, pool.slots, pool.description
def delete_pool(self, name):
pool = Pool.delete_pool(name=name)
return pool.pool, pool.slots, pool.description
def get_lineage(self, dag_id, execution_date):
lineage = get_lineage_api(dag_id=dag_id, execution_date=execution_date)
return lineage
| 3,669 | 39.32967 | 99 |
py
|
airflow
|
airflow-main/airflow/api/client/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""API Client that allows interacting with Airflow API."""
from __future__ import annotations
from importlib import import_module
from airflow import api
from airflow.api.client.api_client import Client
from airflow.configuration import conf
def get_current_api_client() -> Client:
"""Return current API Client based on current Airflow configuration."""
api_module = import_module(conf.get_mandatory_value("cli", "api_client"))
auth_backends = api.load_auth()
session = None
for backend in auth_backends:
session_factory = getattr(backend, "create_client_session", None)
if session_factory:
session = session_factory()
api_client = api_module.Client(
api_base_url=conf.get("cli", "endpoint_url"),
auth=getattr(backend, "CLIENT_AUTH", None),
session=session,
)
return api_client
| 1,673 | 37.930233 | 77 |
py
|
airflow
|
airflow-main/airflow/api/client/api_client.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Client for all the API clients."""
from __future__ import annotations
import httpx
class Client:
"""Base API client for all API clients."""
def __init__(self, api_base_url, auth=None, session: httpx.Client | None = None):
self._api_base_url = api_base_url
self._session: httpx.Client = session or httpx.Client()
if auth:
self._session.auth = auth
def trigger_dag(self, dag_id, run_id=None, conf=None, execution_date=None, replace_microseconds=True):
"""Create a dag run for the specified dag.
:param dag_id:
:param run_id:
:param conf:
:param execution_date:
:param replace_microseconds:
:return:
"""
raise NotImplementedError()
def delete_dag(self, dag_id):
"""Delete all DB records related to the specified dag.
:param dag_id:
"""
raise NotImplementedError()
def get_pool(self, name):
"""Get pool.
:param name: pool name
"""
raise NotImplementedError()
def get_pools(self):
"""Get all pools."""
raise NotImplementedError()
def create_pool(self, name, slots, description):
"""Create a pool.
:param name: pool name
:param slots: pool slots amount
:param description: pool description
"""
raise NotImplementedError()
def delete_pool(self, name):
"""Delete pool.
:param name: pool name
"""
raise NotImplementedError()
def get_lineage(self, dag_id: str, execution_date: str):
"""
Return the lineage information for the dag on this execution date.
:param dag_id:
:param execution_date:
:return:
"""
raise NotImplementedError()
| 2,590 | 28.443182 | 106 |
py
|
airflow
|
airflow-main/airflow/auth/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/auth/managers/base_auth_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from abc import abstractmethod
from airflow.utils.log.logging_mixin import LoggingMixin
class BaseAuthManager(LoggingMixin):
"""
Class to derive in order to implement concrete auth managers.
Auth managers are responsible for any user management related operation such as login, logout, authz, ...
"""
@abstractmethod
def get_user_name(self) -> str:
"""Return the username associated to the user in session."""
...
@abstractmethod
def is_logged_in(self) -> bool:
"""Return whether the user is logged in."""
...
| 1,403 | 33.243902 | 109 |
py
|
airflow
|
airflow-main/airflow/auth/managers/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/auth/managers/fab/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/auth/managers/fab/fab_auth_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from flask_login import current_user
from airflow.auth.managers.base_auth_manager import BaseAuthManager
class FabAuthManager(BaseAuthManager):
"""
Flask-AppBuilder auth manager.
This auth manager is responsible for providing a backward compatible user management experience to users.
"""
def get_user_name(self) -> str:
"""
Return the username associated to the user in session.
For backward compatibility reasons, the username in FAB auth manager is the concatenation of the
first name and the last name.
"""
first_name = current_user.first_name or ""
last_name = current_user.last_name or ""
return f"{first_name} {last_name}".strip()
def is_logged_in(self) -> bool:
"""Return whether the user is logged in."""
return current_user and not current_user.is_anonymous
| 1,702 | 36.021739 | 109 |
py
|
airflow
|
airflow-main/airflow/models/base.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from sqlalchemy import MetaData, String
from sqlalchemy.orm import registry
from airflow.configuration import conf
SQL_ALCHEMY_SCHEMA = conf.get("database", "SQL_ALCHEMY_SCHEMA")
# For more information about what the tokens in the naming convention
# below mean, see:
# https://docs.sqlalchemy.org/en/14/core/metadata.html#sqlalchemy.schema.MetaData.params.naming_convention
naming_convention = {
"ix": "idx_%(column_0_N_label)s",
"uq": "%(table_name)s_%(column_0_N_name)s_uq",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "%(table_name)s_%(column_0_name)s_fkey",
"pk": "%(table_name)s_pkey",
}
def _get_schema():
if not SQL_ALCHEMY_SCHEMA or SQL_ALCHEMY_SCHEMA.isspace():
return None
return SQL_ALCHEMY_SCHEMA
metadata = MetaData(schema=_get_schema(), naming_convention=naming_convention)
mapper_registry = registry(metadata=metadata)
Base: Any = mapper_registry.generate_base()
ID_LEN = 250
def get_id_collation_args():
"""Get SQLAlchemy args to use for COLLATION."""
collation = conf.get("database", "sql_engine_collation_for_ids", fallback=None)
if collation:
return {"collation": collation}
else:
# Automatically use utf8mb3_bin collation for mysql
# This is backwards-compatible. All our IDS are ASCII anyway so even if
# we migrate from previously installed database with different collation and we end up mixture of
# COLLATIONS, it's not a problem whatsoever (and we keep it small enough so that our indexes
# for MYSQL will not exceed the maximum index size.
#
# See https://github.com/apache/airflow/pull/17603#issuecomment-901121618.
#
# We cannot use session/dialect as at this point we are trying to determine the right connection
# parameters, so we use the connection
conn = conf.get("database", "sql_alchemy_conn", fallback="")
if conn.startswith("mysql") or conn.startswith("mariadb"):
return {"collation": "utf8mb3_bin"}
return {}
COLLATION_ARGS = get_id_collation_args()
def StringID(*, length=ID_LEN, **kwargs) -> String:
return String(length=length, **kwargs, **COLLATION_ARGS)
| 3,050 | 36.207317 | 106 |
py
|
airflow
|
airflow-main/airflow/models/taskinstance.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections.abc
import contextlib
import hashlib
import logging
import math
import operator
import os
import signal
import warnings
from collections import defaultdict
from datetime import datetime, timedelta
from enum import Enum
from functools import partial
from pathlib import PurePath
from types import TracebackType
from typing import TYPE_CHECKING, Any, Callable, Collection, Generator, Iterable, Tuple
from urllib.parse import quote, urljoin
import dill
import jinja2
import lazy_object_proxy
import pendulum
from jinja2 import TemplateAssertionError, UndefinedError
from sqlalchemy import (
Column,
DateTime,
Float,
ForeignKeyConstraint,
Index,
Integer,
PrimaryKeyConstraint,
String,
Text,
and_,
delete,
false,
func,
inspect,
or_,
text,
update,
)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.orm import reconstructor, relationship
from sqlalchemy.orm.attributes import NO_VALUE, set_committed_value
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.elements import BooleanClauseList
from sqlalchemy.sql.expression import ColumnOperators, case
from airflow import settings
from airflow.compat.functools import cache
from airflow.configuration import conf
from airflow.datasets import Dataset
from airflow.datasets.manager import dataset_manager
from airflow.exceptions import (
AirflowException,
AirflowFailException,
AirflowRescheduleException,
AirflowSensorTimeout,
AirflowSkipException,
AirflowTaskTimeout,
DagRunNotFound,
RemovedInAirflow3Warning,
TaskDeferralError,
TaskDeferred,
UnmappableXComLengthPushed,
UnmappableXComTypePushed,
XComForMappingNotPushed,
)
from airflow.listeners.listener import get_listener_manager
from airflow.models.base import Base, StringID
from airflow.models.dagbag import DagBag
from airflow.models.log import Log
from airflow.models.mappedoperator import MappedOperator
from airflow.models.param import process_params
from airflow.models.taskfail import TaskFail
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.models.taskmap import TaskMap
from airflow.models.taskreschedule import TaskReschedule
from airflow.models.xcom import LazyXComAccess, XCom
from airflow.plugins_manager import integrate_macros_plugins
from airflow.sentry import Sentry
from airflow.stats import Stats
from airflow.templates import SandboxedEnvironment
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import REQUEUEABLE_DEPS, RUNNING_DEPS
from airflow.timetables.base import DataInterval
from airflow.typing_compat import Literal, TypeGuard
from airflow.utils import timezone
from airflow.utils.context import ConnectionAccessor, Context, VariableAccessor, context_merge
from airflow.utils.email import send_email
from airflow.utils.helpers import prune_dict, render_template_to_string
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.module_loading import qualname
from airflow.utils.net import get_hostname
from airflow.utils.operator_helpers import context_to_airflow_vars
from airflow.utils.platform import getuser
from airflow.utils.retries import run_with_db_retries
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.sqlalchemy import (
ExecutorConfigType,
ExtendedJSON,
UtcDateTime,
tuple_in_condition,
with_row_locks,
)
from airflow.utils.state import DagRunState, JobState, State, TaskInstanceState
from airflow.utils.task_group import MappedTaskGroup
from airflow.utils.timeout import timeout
from airflow.utils.xcom import XCOM_RETURN_KEY
TR = TaskReschedule
_CURRENT_CONTEXT: list[Context] = []
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from airflow.models.abstractoperator import TaskStateChangeCallback
from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG, DagModel
from airflow.models.dagrun import DagRun
from airflow.models.dataset import DatasetEvent
from airflow.models.operator import Operator
from airflow.utils.task_group import TaskGroup
# This is a workaround because mypy doesn't work with hybrid_property
# TODO: remove this hack and move hybrid_property back to main import block
# See https://github.com/python/mypy/issues/4430
hybrid_property = property
else:
from sqlalchemy.ext.hybrid import hybrid_property
PAST_DEPENDS_MET = "past_depends_met"
class TaskReturnCode(Enum):
"""
Enum to signal manner of exit for task run command.
:meta private:
"""
DEFERRED = 100
"""When task exits with deferral to trigger."""
@contextlib.contextmanager
def set_current_context(context: Context) -> Generator[Context, None, None]:
"""
Sets the current execution context to the provided context object.
This method should be called once per Task execution, before calling operator.execute.
"""
_CURRENT_CONTEXT.append(context)
try:
yield context
finally:
expected_state = _CURRENT_CONTEXT.pop()
if expected_state != context:
log.warning(
"Current context is not equal to the state at context stack. Expected=%s, got=%s",
context,
expected_state,
)
def stop_all_tasks_in_dag(tis: list[TaskInstance], session: Session, task_id_to_ignore: int):
for ti in tis:
if ti.task_id == task_id_to_ignore or ti.state in (
TaskInstanceState.SUCCESS,
TaskInstanceState.FAILED,
):
continue
if ti.state == TaskInstanceState.RUNNING:
log.info("Forcing task %s to fail", ti.task_id)
ti.error(session)
else:
log.info("Setting task %s to SKIPPED", ti.task_id)
ti.set_state(state=TaskInstanceState.SKIPPED, session=session)
def clear_task_instances(
tis: list[TaskInstance],
session: Session,
activate_dag_runs: None = None,
dag: DAG | None = None,
dag_run_state: DagRunState | Literal[False] = DagRunState.QUEUED,
) -> None:
"""
Clears a set of task instances, but makes sure the running ones
get killed. Also sets Dagrun's `state` to QUEUED and `start_date`
to the time of execution. But only for finished DRs (SUCCESS and FAILED).
Doesn't clear DR's `state` and `start_date`for running
DRs (QUEUED and RUNNING) because clearing the state for already
running DR is redundant and clearing `start_date` affects DR's duration.
:param tis: a list of task instances
:param session: current session
:param dag_run_state: state to set finished DagRuns to.
If set to False, DagRuns state will not be changed.
:param dag: DAG object
:param activate_dag_runs: Deprecated parameter, do not pass
"""
job_ids = []
# Keys: dag_id -> run_id -> map_indexes -> try_numbers -> task_id
task_id_by_key: dict[str, dict[str, dict[int, dict[int, set[str]]]]] = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(set)))
)
dag_bag = DagBag(read_dags_from_db=True)
for ti in tis:
if ti.state == TaskInstanceState.RUNNING:
if ti.job_id:
# If a task is cleared when running, set its state to RESTARTING so that
# the task is terminated and becomes eligible for retry.
ti.state = TaskInstanceState.RESTARTING
job_ids.append(ti.job_id)
else:
ti_dag = dag if dag and dag.dag_id == ti.dag_id else dag_bag.get_dag(ti.dag_id, session=session)
task_id = ti.task_id
if ti_dag and ti_dag.has_task(task_id):
task = ti_dag.get_task(task_id)
ti.refresh_from_task(task)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if the DAG or
# task are not found since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the last attempted try number.
ti.max_tries = max(ti.max_tries, ti.prev_attempted_tries)
ti.state = None
ti.external_executor_id = None
ti.clear_next_method_args()
session.merge(ti)
task_id_by_key[ti.dag_id][ti.run_id][ti.map_index][ti.try_number].add(ti.task_id)
if task_id_by_key:
# Clear all reschedules related to the ti to clear
# This is an optimization for the common case where all tis are for a small number
# of dag_id, run_id, try_number, and map_index. Use a nested dict of dag_id,
# run_id, try_number, map_index, and task_id to construct the where clause in a
# hierarchical manner. This speeds up the delete statement by more than 40x for
# large number of tis (50k+).
conditions = or_(
and_(
TR.dag_id == dag_id,
or_(
and_(
TR.run_id == run_id,
or_(
and_(
TR.map_index == map_index,
or_(
and_(TR.try_number == try_number, TR.task_id.in_(task_ids))
for try_number, task_ids in task_tries.items()
),
)
for map_index, task_tries in map_indexes.items()
),
)
for run_id, map_indexes in run_ids.items()
),
)
for dag_id, run_ids in task_id_by_key.items()
)
delete_qry = TR.__table__.delete().where(conditions)
session.execute(delete_qry)
if job_ids:
from airflow.jobs.job import Job
session.execute(update(Job).where(Job.id.in_(job_ids)).values(state=JobState.RESTARTING))
if activate_dag_runs is not None:
warnings.warn(
"`activate_dag_runs` parameter to clear_task_instances function is deprecated. "
"Please use `dag_run_state`",
RemovedInAirflow3Warning,
stacklevel=2,
)
if not activate_dag_runs:
dag_run_state = False
if dag_run_state is not False and tis:
from airflow.models.dagrun import DagRun # Avoid circular import
run_ids_by_dag_id = defaultdict(set)
for instance in tis:
run_ids_by_dag_id[instance.dag_id].add(instance.run_id)
drs = (
session.query(DagRun)
.filter(
or_(
and_(DagRun.dag_id == dag_id, DagRun.run_id.in_(run_ids))
for dag_id, run_ids in run_ids_by_dag_id.items()
)
)
.all()
)
dag_run_state = DagRunState(dag_run_state) # Validate the state value.
for dr in drs:
if dr.state in State.finished_dr_states:
dr.state = dag_run_state
dr.start_date = timezone.utcnow()
if dag_run_state == DagRunState.QUEUED:
dr.last_scheduling_decision = None
dr.start_date = None
session.flush()
def _is_mappable_value(value: Any) -> TypeGuard[Collection]:
"""Whether a value can be used for task mapping.
We only allow collections with guaranteed ordering, but exclude character
sequences since that's usually not what users would expect to be mappable.
"""
if not isinstance(value, (collections.abc.Sequence, dict)):
return False
if isinstance(value, (bytearray, bytes, str)):
return False
return True
def _creator_note(val):
"""Custom creator for the ``note`` association proxy."""
if isinstance(val, str):
return TaskInstanceNote(content=val)
elif isinstance(val, dict):
return TaskInstanceNote(**val)
else:
return TaskInstanceNote(*val)
class TaskInstance(Base, LoggingMixin):
"""
Task instances store the state of a task instance. This table is the
authority and single source of truth around what tasks have run and the
state they are in.
The SqlAlchemy model doesn't have a SqlAlchemy foreign key to the task or
dag model deliberately to have more control over transactions.
Database transactions on this table should insure double triggers and
any confusion around what task instances are or aren't ready to run
even while multiple schedulers may be firing task instances.
A value of -1 in map_index represents any of: a TI without mapped tasks;
a TI with mapped tasks that has yet to be expanded (state=pending);
a TI with mapped tasks that expanded to an empty list (state=skipped).
"""
__tablename__ = "task_instance"
task_id = Column(StringID(), primary_key=True, nullable=False)
dag_id = Column(StringID(), primary_key=True, nullable=False)
run_id = Column(StringID(), primary_key=True, nullable=False)
map_index = Column(Integer, primary_key=True, nullable=False, server_default=text("-1"))
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Float)
state = Column(String(20))
_try_number = Column("try_number", Integer, default=0)
max_tries = Column(Integer, server_default=text("-1"))
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer)
pool = Column(String(256), nullable=False)
pool_slots = Column(Integer, default=1, nullable=False)
queue = Column(String(256))
priority_weight = Column(Integer)
operator = Column(String(1000))
custom_operator_name = Column(String(1000))
queued_dttm = Column(UtcDateTime)
queued_by_job_id = Column(Integer)
pid = Column(Integer)
executor_config = Column(ExecutorConfigType(pickler=dill))
updated_at = Column(UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow)
external_executor_id = Column(StringID())
# The trigger to resume on if we are in state DEFERRED
trigger_id = Column(Integer)
# Optional timeout datetime for the trigger (past this, we'll fail)
trigger_timeout = Column(DateTime)
# The trigger_timeout should be TIMESTAMP(using UtcDateTime) but for ease of
# migration, we are keeping it as DateTime pending a change where expensive
# migration is inevitable.
# The method to call next, and any extra arguments to pass to it.
# Usually used when resuming from DEFERRED.
next_method = Column(String(1000))
next_kwargs = Column(MutableDict.as_mutable(ExtendedJSON))
# If adding new fields here then remember to add them to
# refresh_from_db() or they won't display in the UI correctly
__table_args__ = (
Index("ti_dag_state", dag_id, state),
Index("ti_dag_run", dag_id, run_id),
Index("ti_state", state),
Index("ti_state_lkp", dag_id, task_id, run_id, state),
# The below index has been added to improve performance on postgres setups with tens of millions of
# taskinstance rows. Aim is to improve the below query (it can be used to find the last successful
# execution date of a task instance):
# SELECT start_date FROM task_instance WHERE dag_id = 'xx' AND task_id = 'yy' AND state = 'success'
# ORDER BY start_date DESC NULLS LAST LIMIT 1;
# Existing "ti_state_lkp" is not enough for such query when this table has millions of rows, since
# rows have to be fetched in order to retrieve the start_date column. With this index, INDEX ONLY SCAN
# is performed and that query runs within milliseconds.
Index("ti_state_incl_start_date", dag_id, task_id, state, postgresql_include=["start_date"]),
Index("ti_pool", pool, state, priority_weight),
Index("ti_job_id", job_id),
Index("ti_trigger_id", trigger_id),
PrimaryKeyConstraint(
"dag_id", "task_id", "run_id", "map_index", name="task_instance_pkey", mssql_clustered=True
),
ForeignKeyConstraint(
[trigger_id],
["trigger.id"],
name="task_instance_trigger_id_fkey",
ondelete="CASCADE",
),
ForeignKeyConstraint(
[dag_id, run_id],
["dag_run.dag_id", "dag_run.run_id"],
name="task_instance_dag_run_fkey",
ondelete="CASCADE",
),
)
dag_model = relationship(
"DagModel",
primaryjoin="TaskInstance.dag_id == DagModel.dag_id",
foreign_keys=dag_id,
uselist=False,
innerjoin=True,
viewonly=True,
)
trigger = relationship("Trigger", uselist=False, back_populates="task_instance")
triggerer_job = association_proxy("trigger", "triggerer_job")
dag_run = relationship("DagRun", back_populates="task_instances", lazy="joined", innerjoin=True)
rendered_task_instance_fields = relationship("RenderedTaskInstanceFields", lazy="noload", uselist=False)
execution_date = association_proxy("dag_run", "execution_date")
task_instance_note = relationship(
"TaskInstanceNote",
back_populates="task_instance",
uselist=False,
cascade="all, delete, delete-orphan",
)
note = association_proxy("task_instance_note", "content", creator=_creator_note)
task: Operator # Not always set...
is_trigger_log_context: bool = False
"""Indicate to FileTaskHandler that logging context should be set up for trigger logging.
:meta private:
"""
def __init__(
self,
task: Operator,
execution_date: datetime | None = None,
run_id: str | None = None,
state: str | None = None,
map_index: int = -1,
):
super().__init__()
self.dag_id = task.dag_id
self.task_id = task.task_id
self.map_index = map_index
self.refresh_from_task(task)
# init_on_load will config the log
self.init_on_load()
if run_id is None and execution_date is not None:
from airflow.models.dagrun import DagRun # Avoid circular import
warnings.warn(
"Passing an execution_date to `TaskInstance()` is deprecated in favour of passing a run_id",
RemovedInAirflow3Warning,
# Stack level is 4 because SQLA adds some wrappers around the constructor
stacklevel=4,
)
# make sure we have a localized execution_date stored in UTC
if execution_date and not timezone.is_localized(execution_date):
self.log.warning(
"execution date %s has no timezone information. Using default from dag or system",
execution_date,
)
if self.task.has_dag():
if TYPE_CHECKING:
assert self.task.dag
execution_date = timezone.make_aware(execution_date, self.task.dag.timezone)
else:
execution_date = timezone.make_aware(execution_date)
execution_date = timezone.convert_to_utc(execution_date)
with create_session() as session:
run_id = (
session.query(DagRun.run_id)
.filter_by(dag_id=self.dag_id, execution_date=execution_date)
.scalar()
)
if run_id is None:
raise DagRunNotFound(
f"DagRun for {self.dag_id!r} with date {execution_date} not found"
) from None
self.run_id = run_id
self.try_number = 0
self.max_tries = self.task.retries
self.unixname = getuser()
if state:
self.state = state
self.hostname = ""
# Is this TaskInstance being currently running within `airflow tasks run --raw`.
# Not persisted to the database so only valid for the current process
self.raw = False
# can be changed when calling 'run'
self.test_mode = False
@property
def stats_tags(self) -> dict[str, str]:
return prune_dict({"dag_id": self.dag_id, "task_id": self.task_id})
@staticmethod
def insert_mapping(run_id: str, task: Operator, map_index: int) -> dict[str, Any]:
"""Insert mapping.
:meta private:
"""
return {
"dag_id": task.dag_id,
"task_id": task.task_id,
"run_id": run_id,
"_try_number": 0,
"hostname": "",
"unixname": getuser(),
"queue": task.queue,
"pool": task.pool,
"pool_slots": task.pool_slots,
"priority_weight": task.priority_weight_total,
"run_as_user": task.run_as_user,
"max_tries": task.retries,
"executor_config": task.executor_config,
"operator": task.task_type,
"custom_operator_name": getattr(task, "custom_operator_name", None),
"map_index": map_index,
}
@reconstructor
def init_on_load(self) -> None:
"""Initialize the attributes that aren't stored in the DB."""
# correctly config the ti log
self._log = logging.getLogger("airflow.task")
self.test_mode = False # can be changed when calling 'run'
@hybrid_property
def try_number(self):
"""
Return the try number that this task number will be when it is actually
run.
If the TaskInstance is currently running, this will match the column in the
database, in all other cases this will be incremented.
"""
# This is designed so that task logs end up in the right file.
if self.state == TaskInstanceState.RUNNING:
return self._try_number
return self._try_number + 1
@try_number.setter
def try_number(self, value: int) -> None:
self._try_number = value
@property
def prev_attempted_tries(self) -> int:
"""
Based on this instance's try_number, this will calculate
the number of previously attempted tries, defaulting to 0.
"""
# Expose this for the Task Tries and Gantt graph views.
# Using `try_number` throws off the counts for non-running tasks.
# Also useful in error logging contexts to get
# the try number for the last try that was attempted.
# https://issues.apache.org/jira/browse/AIRFLOW-2143
return self._try_number
@property
def next_try_number(self) -> int:
return self._try_number + 1
@property
def operator_name(self) -> str | None:
"""@property: use a more friendly display name for the operator, if set."""
return self.custom_operator_name or self.operator
def command_as_list(
self,
mark_success=False,
ignore_all_deps=False,
ignore_task_deps=False,
ignore_depends_on_past=False,
wait_for_past_depends_before_skipping=False,
ignore_ti_state=False,
local=False,
pickle_id: int | None = None,
raw=False,
job_id=None,
pool=None,
cfg_path=None,
) -> list[str]:
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
dag: DAG | DagModel
# Use the dag if we have it, else fallback to the ORM dag_model, which might not be loaded
if hasattr(self, "task") and hasattr(self.task, "dag") and self.task.dag is not None:
dag = self.task.dag
else:
dag = self.dag_model
should_pass_filepath = not pickle_id and dag
path: PurePath | None = None
if should_pass_filepath:
if dag.is_subdag:
if TYPE_CHECKING:
assert dag.parent_dag is not None
path = dag.parent_dag.relative_fileloc
else:
path = dag.relative_fileloc
if path:
if not path.is_absolute():
path = "DAGS_FOLDER" / path
return TaskInstance.generate_command(
self.dag_id,
self.task_id,
run_id=self.run_id,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
file_path=path,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path,
map_index=self.map_index,
)
@staticmethod
def generate_command(
dag_id: str,
task_id: str,
run_id: str,
mark_success: bool = False,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
local: bool = False,
pickle_id: int | None = None,
file_path: PurePath | str | None = None,
raw: bool = False,
job_id: str | None = None,
pool: str | None = None,
cfg_path: str | None = None,
map_index: int = -1,
) -> list[str]:
"""
Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:param task_id: Task ID
:param run_id: The run_id of this task's DagRun
:param mark_success: Whether to mark the task as successful
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:param wait_for_past_depends_before_skipping: Wait for past depends before marking the ti as skipped
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:param ignore_ti_state: Ignore the task instance's previous failure/success
:param local: Whether to run the task locally
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:param file_path: path to the file containing the DAG definition
:param raw: raw mode (needs more details)
:param job_id: job ID (needs more details)
:param pool: the Airflow pool that the task should run in
:param cfg_path: the Path to the configuration file
:return: shell command that can be used to run the task instance
"""
cmd = ["airflow", "tasks", "run", dag_id, task_id, run_id]
if mark_success:
cmd.extend(["--mark-success"])
if pickle_id:
cmd.extend(["--pickle", str(pickle_id)])
if job_id:
cmd.extend(["--job-id", str(job_id)])
if ignore_all_deps:
cmd.extend(["--ignore-all-dependencies"])
if ignore_task_deps:
cmd.extend(["--ignore-dependencies"])
if ignore_depends_on_past:
cmd.extend(["--depends-on-past", "ignore"])
elif wait_for_past_depends_before_skipping:
cmd.extend(["--depends-on-past", "wait"])
if ignore_ti_state:
cmd.extend(["--force"])
if local:
cmd.extend(["--local"])
if pool:
cmd.extend(["--pool", pool])
if raw:
cmd.extend(["--raw"])
if file_path:
cmd.extend(["--subdir", os.fspath(file_path)])
if cfg_path:
cmd.extend(["--cfg-path", cfg_path])
if map_index != -1:
cmd.extend(["--map-index", str(map_index)])
return cmd
@property
def log_url(self) -> str:
"""Log URL for TaskInstance."""
iso = quote(self.execution_date.isoformat())
base_url = conf.get_mandatory_value("webserver", "BASE_URL")
return urljoin(
base_url,
f"log?execution_date={iso}"
f"&task_id={self.task_id}"
f"&dag_id={self.dag_id}"
f"&map_index={self.map_index}",
)
@property
def mark_success_url(self) -> str:
"""URL to mark TI success."""
base_url = conf.get_mandatory_value("webserver", "BASE_URL")
return urljoin(
base_url,
f"confirm?task_id={self.task_id}"
f"&dag_id={self.dag_id}"
f"&dag_run_id={quote(self.run_id)}"
"&upstream=false"
"&downstream=false"
"&state=success",
)
@provide_session
def current_state(self, session: Session = NEW_SESSION) -> str:
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
sqlalchemy.inspect is used here to get the primary keys ensuring that if they change
it will not regress
:param session: SQLAlchemy ORM Session
"""
filters = (col == getattr(self, col.name) for col in inspect(TaskInstance).primary_key)
return session.query(TaskInstance.state).filter(*filters).scalar()
@provide_session
def error(self, session: Session = NEW_SESSION) -> None:
"""
Forces the task instance's state to FAILED in the database.
:param session: SQLAlchemy ORM Session
"""
self.log.error("Recording the task instance as FAILED")
self.state = TaskInstanceState.FAILED
session.merge(self)
session.commit()
@provide_session
def refresh_from_db(self, session: Session = NEW_SESSION, lock_for_update: bool = False) -> None:
"""
Refreshes the task instance from the database based on the primary key.
:param session: SQLAlchemy ORM Session
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
self.log.debug("Refreshing TaskInstance %s from DB", self)
if self in session:
session.refresh(self, TaskInstance.__mapper__.column_attrs.keys())
qry = (
# To avoid joining any relationships, by default select all
# columns, not the object. This also means we get (effectively) a
# namedtuple back, not a TI object
session.query(*TaskInstance.__table__.columns).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == self.run_id,
TaskInstance.map_index == self.map_index,
)
)
if lock_for_update:
for attempt in run_with_db_retries(logger=self.log):
with attempt:
ti: TaskInstance | None = qry.with_for_update().one_or_none()
else:
ti = qry.one_or_none()
if ti:
# Fields ordered per model definition
self.start_date = ti.start_date
self.end_date = ti.end_date
self.duration = ti.duration
self.state = ti.state
# Since we selected columns, not the object, this is the raw value
self.try_number = ti.try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.unixname = ti.unixname
self.job_id = ti.job_id
self.pool = ti.pool
self.pool_slots = ti.pool_slots or 1
self.queue = ti.queue
self.priority_weight = ti.priority_weight
self.operator = ti.operator
self.custom_operator_name = ti.custom_operator_name
self.queued_dttm = ti.queued_dttm
self.queued_by_job_id = ti.queued_by_job_id
self.pid = ti.pid
self.executor_config = ti.executor_config
self.external_executor_id = ti.external_executor_id
self.trigger_id = ti.trigger_id
self.next_method = ti.next_method
self.next_kwargs = ti.next_kwargs
else:
self.state = None
def refresh_from_task(self, task: Operator, pool_override: str | None = None) -> None:
"""
Copy common attributes from the given task.
:param task: The task object to copy from
:param pool_override: Use the pool_override instead of task's pool
"""
self.task = task
self.queue = task.queue
self.pool = pool_override or task.pool
self.pool_slots = task.pool_slots
self.priority_weight = task.priority_weight_total
self.run_as_user = task.run_as_user
# Do not set max_tries to task.retries here because max_tries is a cumulative
# value that needs to be stored in the db.
self.executor_config = task.executor_config
self.operator = task.task_type
self.custom_operator_name = getattr(task, "custom_operator_name", None)
@provide_session
def clear_xcom_data(self, session: Session = NEW_SESSION) -> None:
"""Clear all XCom data from the database for the task instance.
If the task is unmapped, all XComs matching this task ID in the same DAG
run are removed. If the task is mapped, only the one with matching map
index is removed.
:param session: SQLAlchemy ORM Session
"""
self.log.debug("Clearing XCom data")
if self.map_index < 0:
map_index: int | None = None
else:
map_index = self.map_index
XCom.clear(
dag_id=self.dag_id,
task_id=self.task_id,
run_id=self.run_id,
map_index=map_index,
session=session,
)
@property
def key(self) -> TaskInstanceKey:
"""Returns a tuple that identifies the task instance uniquely."""
return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.map_index)
@provide_session
def set_state(self, state: str | None, session: Session = NEW_SESSION) -> bool:
"""
Set TaskInstance state.
:param state: State to set for the TI
:param session: SQLAlchemy ORM Session
:return: Was the state changed
"""
if self.state == state:
return False
current_time = timezone.utcnow()
self.log.debug("Setting task state for %s to %s", self, state)
self.state = state
self.start_date = self.start_date or current_time
if self.state in State.finished or self.state == TaskInstanceState.UP_FOR_RETRY:
self.end_date = self.end_date or current_time
self.duration = (self.end_date - self.start_date).total_seconds()
session.merge(self)
return True
@property
def is_premature(self) -> bool:
"""
Returns whether a task is in UP_FOR_RETRY state and its retry interval
has elapsed.
"""
# is the task still in the retry waiting period?
return self.state == TaskInstanceState.UP_FOR_RETRY and not self.ready_for_retry()
@provide_session
def are_dependents_done(self, session: Session = NEW_SESSION) -> bool:
"""
Checks whether the immediate dependents of this task instance have succeeded or have been skipped.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
:param session: SQLAlchemy ORM Session
"""
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.run_id == self.run_id,
TaskInstance.state.in_((TaskInstanceState.SKIPPED, TaskInstanceState.SUCCESS)),
)
count = ti[0][0]
return count == len(task.downstream_task_ids)
@provide_session
def get_previous_dagrun(
self,
state: DagRunState | None = None,
session: Session | None = None,
) -> DagRun | None:
"""The DagRun that ran before this task instance's DagRun.
:param state: If passed, it only take into account instances of a specific state.
:param session: SQLAlchemy ORM Session.
"""
dag = self.task.dag
if dag is None:
return None
dr = self.get_dagrun(session=session)
dr.dag = dag
# We always ignore schedule in dagrun lookup when `state` is given
# or the DAG is never scheduled. For legacy reasons, when
# `catchup=True`, we use `get_previous_scheduled_dagrun` unless
# `ignore_schedule` is `True`.
ignore_schedule = state is not None or not dag.timetable.can_be_scheduled
if dag.catchup is True and not ignore_schedule:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session, state=state)
if last_dagrun:
return last_dagrun
return None
@provide_session
def get_previous_ti(
self,
state: DagRunState | None = None,
session: Session = NEW_SESSION,
) -> TaskInstance | None:
"""
The task instance for the task that ran before this task instance.
:param state: If passed, it only take into account instances of a specific state.
:param session: SQLAlchemy ORM Session
"""
dagrun = self.get_previous_dagrun(state, session=session)
if dagrun is None:
return None
return dagrun.get_task_instance(self.task_id, session=session)
@property
def previous_ti(self) -> TaskInstance | None:
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
"""
warnings.warn(
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
""",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_previous_ti()
@property
def previous_ti_success(self) -> TaskInstance | None:
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
"""
warnings.warn(
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
""",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_previous_ti(state=DagRunState.SUCCESS)
@provide_session
def get_previous_execution_date(
self,
state: DagRunState | None = None,
session: Session = NEW_SESSION,
) -> pendulum.DateTime | None:
"""
The execution date from property previous_ti_success.
:param state: If passed, it only take into account instances of a specific state.
:param session: SQLAlchemy ORM Session
"""
self.log.debug("previous_execution_date was called")
prev_ti = self.get_previous_ti(state=state, session=session)
return prev_ti and pendulum.instance(prev_ti.execution_date)
@provide_session
def get_previous_start_date(
self, state: DagRunState | None = None, session: Session = NEW_SESSION
) -> pendulum.DateTime | None:
"""
The start date from property previous_ti_success.
:param state: If passed, it only take into account instances of a specific state.
:param session: SQLAlchemy ORM Session
"""
self.log.debug("previous_start_date was called")
prev_ti = self.get_previous_ti(state=state, session=session)
# prev_ti may not exist and prev_ti.start_date may be None.
return prev_ti and prev_ti.start_date and pendulum.instance(prev_ti.start_date)
@property
def previous_start_date_success(self) -> pendulum.DateTime | None:
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_start_date` method.
"""
warnings.warn(
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_start_date` method.
""",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_previous_start_date(state=DagRunState.SUCCESS)
@provide_session
def are_dependencies_met(
self, dep_context: DepContext | None = None, session: Session = NEW_SESSION, verbose: bool = False
) -> bool:
"""
Returns whether or not all the conditions are met for this task instance to be run
given the context for the dependencies (e.g. a task instance being force run from
the UI will ignore some dependencies).
:param dep_context: The execution context that determines the dependencies that
should be evaluated.
:param session: database session
:param verbose: whether log details on failed dependencies on
info or debug log level
"""
dep_context = dep_context or DepContext()
failed = False
verbose_aware_logger = self.log.info if verbose else self.log.debug
for dep_status in self.get_failed_dep_statuses(dep_context=dep_context, session=session):
failed = True
verbose_aware_logger(
"Dependencies not met for %s, dependency '%s' FAILED: %s",
self,
dep_status.dep_name,
dep_status.reason,
)
if failed:
return False
verbose_aware_logger("Dependencies all met for dep_context=%s ti=%s", dep_context.description, self)
return True
@provide_session
def get_failed_dep_statuses(self, dep_context: DepContext | None = None, session: Session = NEW_SESSION):
"""Get failed Dependencies."""
dep_context = dep_context or DepContext()
for dep in dep_context.deps | self.task.deps:
for dep_status in dep.get_dep_statuses(self, session, dep_context):
self.log.debug(
"%s dependency '%s' PASSED: %s, %s",
self,
dep_status.dep_name,
dep_status.passed,
dep_status.reason,
)
if not dep_status.passed:
yield dep_status
def __repr__(self) -> str:
prefix = f"<TaskInstance: {self.dag_id}.{self.task_id} {self.run_id} "
if self.map_index != -1:
prefix += f"map_index={self.map_index} "
return prefix + f"[{self.state}]>"
def next_retry_datetime(self):
"""
Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds.
"""
from airflow.models.abstractoperator import MAX_RETRY_DELAY
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
# If the min_backoff calculation is below 1, it will be converted to 0 via int. Thus,
# we must round up prior to converting to an int, otherwise a divide by zero error
# will occur in the modded_hash calculation.
min_backoff = int(math.ceil(delay.total_seconds() * (2 ** (self.try_number - 2))))
# In the case when delay.total_seconds() is 0, min_backoff will not be rounded up to 1.
# To address this, we impose a lower bound of 1 on min_backoff. This effectively makes
# the ceiling function unnecessary, but the ceiling function was retained to avoid
# introducing a breaking change.
if min_backoff < 1:
min_backoff = 1
# deterministic per task instance
ti_hash = int(
hashlib.sha1(
f"{self.dag_id}#{self.task_id}#{self.execution_date}#{self.try_number}".encode()
).hexdigest(),
16,
)
# between 1 and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + ti_hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail with "OverflowError".
delay_backoff_in_seconds = min(modded_hash, MAX_RETRY_DELAY)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay
def ready_for_retry(self) -> bool:
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return self.state == TaskInstanceState.UP_FOR_RETRY and self.next_retry_datetime() < timezone.utcnow()
@provide_session
def get_dagrun(self, session: Session = NEW_SESSION) -> DagRun:
"""
Returns the DagRun for this TaskInstance.
:param session: SQLAlchemy ORM Session
:return: DagRun
"""
info = inspect(self)
if info.attrs.dag_run.loaded_value is not NO_VALUE:
if hasattr(self, "task"):
self.dag_run.dag = self.task.dag
return self.dag_run
from airflow.models.dagrun import DagRun # Avoid circular import
dr = session.query(DagRun).filter(DagRun.dag_id == self.dag_id, DagRun.run_id == self.run_id).one()
if hasattr(self, "task"):
dr.dag = self.task.dag
# Record it in the instance for next time. This means that `self.execution_date` will work correctly
set_committed_value(self, "dag_run", dr)
return dr
@provide_session
def check_and_change_state_before_execution(
self,
verbose: bool = True,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
test_mode: bool = False,
job_id: str | None = None,
pool: str | None = None,
external_executor_id: str | None = None,
session: Session = NEW_SESSION,
) -> bool:
"""
Checks dependencies and then sets state to RUNNING if they are met. Returns
True if and only if state is set to RUNNING, which implies that task should be
executed, in preparation for _run_raw_task.
:param verbose: whether to turn on more verbose logging
:param ignore_all_deps: Ignore all of the non-critical dependencies, just runs
:param ignore_depends_on_past: Ignore depends_on_past DAG attribute
:param wait_for_past_depends_before_skipping: Wait for past depends before mark the ti as skipped
:param ignore_task_deps: Don't check the dependencies of this TaskInstance's task
:param ignore_ti_state: Disregards previous task instance state
:param mark_success: Don't run the task, mark its state as success
:param test_mode: Doesn't record success or failure in the DB
:param job_id: Job (BackfillJob / LocalTaskJob / SchedulerJob) ID
:param pool: specifies the pool to use to run the task instance
:param external_executor_id: The identifier of the celery executor
:param session: SQLAlchemy ORM Session
:return: whether the state was changed to running or not
"""
task = self.task
self.refresh_from_task(task, pool_override=pool)
self.test_mode = test_mode
self.refresh_from_db(session=session, lock_for_update=True)
self.job_id = job_id
self.hostname = get_hostname()
self.pid = None
if not ignore_all_deps and not ignore_ti_state and self.state == TaskInstanceState.SUCCESS:
Stats.incr("previously_succeeded", tags=self.stats_tags)
if not mark_success:
# Firstly find non-runnable and non-requeueable tis.
# Since mark_success is not set, we do nothing.
non_requeueable_dep_context = DepContext(
deps=RUNNING_DEPS - REQUEUEABLE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_ti_state=ignore_ti_state,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_task_deps=ignore_task_deps,
description="non-requeueable deps",
)
if not self.are_dependencies_met(
dep_context=non_requeueable_dep_context, session=session, verbose=True
):
session.commit()
return False
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Attempt 1 instead of
# Attempt 0 for the first attempt).
# Set the task start date. In case it was re-scheduled use the initial
# start date that is recorded in task_reschedule table
# If the task continues after being deferred (next_method is set), use the original start_date
self.start_date = self.start_date if self.next_method else timezone.utcnow()
if self.state == TaskInstanceState.UP_FOR_RESCHEDULE:
task_reschedule: TR = TR.query_for_task_instance(self, session=session).first()
if task_reschedule:
self.start_date = task_reschedule.start_date
# Secondly we find non-runnable but requeueable tis. We reset its state.
# This is because we might have hit concurrency limits,
# e.g. because of backfilling.
dep_context = DepContext(
deps=REQUEUEABLE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
description="requeueable deps",
)
if not self.are_dependencies_met(dep_context=dep_context, session=session, verbose=True):
self.state = None
self.log.warning(
"Rescheduling due to concurrency limits reached "
"at task runtime. Attempt %s of "
"%s. State set to NONE.",
self.try_number,
self.max_tries + 1,
)
self.queued_dttm = timezone.utcnow()
session.merge(self)
session.commit()
return False
if self.next_kwargs is not None:
self.log.info("Resuming after deferral")
else:
self.log.info("Starting attempt %s of %s", self.try_number, self.max_tries + 1)
self._try_number += 1
if not test_mode:
session.add(Log(TaskInstanceState.RUNNING.value, self))
self.state = TaskInstanceState.RUNNING
self.emit_state_change_metric(TaskInstanceState.RUNNING)
self.external_executor_id = external_executor_id
self.end_date = None
if not test_mode:
session.merge(self).task = task
session.commit()
# Closing all pooled connections to prevent
# "max number of connections reached"
settings.engine.dispose() # type: ignore
if verbose:
if mark_success:
self.log.info("Marking success for %s on %s", self.task, self.execution_date)
else:
self.log.info("Executing %s on %s", self.task, self.execution_date)
return True
def _date_or_empty(self, attr: str) -> str:
result: datetime | None = getattr(self, attr, None)
return result.strftime("%Y%m%dT%H%M%S") if result else ""
def _log_state(self, lead_msg: str = "") -> None:
params = [
lead_msg,
str(self.state).upper(),
self.dag_id,
self.task_id,
]
message = "%sMarking task as %s. dag_id=%s, task_id=%s, "
if self.map_index >= 0:
params.append(self.map_index)
message += "map_index=%d, "
self.log.info(
message + "execution_date=%s, start_date=%s, end_date=%s",
*params,
self._date_or_empty("execution_date"),
self._date_or_empty("start_date"),
self._date_or_empty("end_date"),
)
def emit_state_change_metric(self, new_state: TaskInstanceState) -> None:
"""
Sends a time metric representing how much time a given state transition took.
The previous state and metric name is deduced from the state the task was put in.
:param new_state: The state that has just been set for this task.
We do not use `self.state`, because sometimes the state is updated directly in the DB and not in
the local TaskInstance object.
Supported states: QUEUED and RUNNING
"""
if self.end_date:
# if the task has an end date, it means that this is not its first round.
# we send the state transition time metric only on the first try, otherwise it gets more complex.
return
# switch on state and deduce which metric to send
if new_state == TaskInstanceState.RUNNING:
metric_name = "queued_duration"
if self.queued_dttm is None:
# this should not really happen except in tests or rare cases,
# but we don't want to create errors just for a metric, so we just skip it
self.log.warning(
"cannot record %s for task %s because previous state change time has not been saved",
metric_name,
self.task_id,
)
return
timing = (timezone.utcnow() - self.queued_dttm).total_seconds()
elif new_state == TaskInstanceState.QUEUED:
metric_name = "scheduled_duration"
if self.start_date is None:
# same comment as above
self.log.warning(
"cannot record %s for task %s because previous state change time has not been saved",
metric_name,
self.task_id,
)
return
timing = (timezone.utcnow() - self.start_date).total_seconds()
else:
raise NotImplementedError("no metric emission setup for state %s", new_state)
# send metric twice, once (legacy) with tags in the name and once with tags as tags
Stats.timing(f"dag.{self.dag_id}.{self.task_id}.{metric_name}", timing)
Stats.timing(f"task.{metric_name}", timing, tags={"task_id": self.task_id, "dag_id": self.dag_id})
# Ensure we unset next_method and next_kwargs to ensure that any
# retries don't re-use them.
def clear_next_method_args(self) -> None:
self.log.debug("Clearing next_method and next_kwargs.")
self.next_method = None
self.next_kwargs = None
@provide_session
@Sentry.enrich_errors
def _run_raw_task(
self,
mark_success: bool = False,
test_mode: bool = False,
job_id: str | None = None,
pool: str | None = None,
session: Session = NEW_SESSION,
) -> TaskReturnCode | None:
"""
Immediately runs the task (without checking or changing db state
before execution) and then sets the appropriate final state after
completion and runs any post-execute callbacks. Meant to be called
only after another function changes the state to running.
:param mark_success: Don't run the task, mark its state as success
:param test_mode: Doesn't record success or failure in the DB
:param pool: specifies the pool to use to run the task instance
:param session: SQLAlchemy ORM Session
"""
self.test_mode = test_mode
self.refresh_from_task(self.task, pool_override=pool)
self.refresh_from_db(session=session)
self.job_id = job_id
self.hostname = get_hostname()
self.pid = os.getpid()
if not test_mode:
session.merge(self)
session.commit()
actual_start_date = timezone.utcnow()
Stats.incr(f"ti.start.{self.task.dag_id}.{self.task.task_id}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("ti.start", tags=self.stats_tags)
# Initialize final state counters at zero
for state in State.task_states:
Stats.incr(
f"ti.finish.{self.task.dag_id}.{self.task.task_id}.{state}",
count=0,
tags=self.stats_tags,
)
# Same metric with tagging
Stats.incr(
"ti.finish",
count=0,
tags={**self.stats_tags, "state": str(state)},
)
self.task = self.task.prepare_for_execution()
context = self.get_template_context(ignore_param_exceptions=False)
# We lose previous state because it's changed in other process in LocalTaskJob.
# We could probably pass it through here though...
get_listener_manager().hook.on_task_instance_running(
previous_state=TaskInstanceState.QUEUED, task_instance=self, session=session
)
try:
if not mark_success:
self._execute_task_with_callbacks(context, test_mode)
if not test_mode:
self.refresh_from_db(lock_for_update=True, session=session)
self.state = TaskInstanceState.SUCCESS
except TaskDeferred as defer:
# The task has signalled it wants to defer execution based on
# a trigger.
self._defer_task(defer=defer, session=session)
self.log.info(
"Pausing task as DEFERRED. dag_id=%s, task_id=%s, execution_date=%s, start_date=%s",
self.dag_id,
self.task_id,
self._date_or_empty("execution_date"),
self._date_or_empty("start_date"),
)
if not test_mode:
session.add(Log(self.state, self))
session.merge(self)
session.commit()
return TaskReturnCode.DEFERRED
except AirflowSkipException as e:
# Recording SKIP
# log only if exception has any arguments to prevent log flooding
if e.args:
self.log.info(e)
if not test_mode:
self.refresh_from_db(lock_for_update=True, session=session)
self.state = TaskInstanceState.SKIPPED
except AirflowRescheduleException as reschedule_exception:
self._handle_reschedule(actual_start_date, reschedule_exception, test_mode, session=session)
session.commit()
return None
except (AirflowFailException, AirflowSensorTimeout) as e:
# If AirflowFailException is raised, task should not retry.
# If a sensor in reschedule mode reaches timeout, task should not retry.
self.handle_failure(e, test_mode, context, force_fail=True, session=session)
session.commit()
raise
except AirflowException as e:
if not test_mode:
self.refresh_from_db(lock_for_update=True, session=session)
# for case when task is marked as success/failed externally
# or dagrun timed out and task is marked as skipped
# current behavior doesn't hit the callbacks
if self.state in State.finished:
self.clear_next_method_args()
session.merge(self)
session.commit()
return None
else:
self.handle_failure(e, test_mode, context, session=session)
session.commit()
raise
except (Exception, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context, session=session)
session.commit()
raise
finally:
Stats.incr(f"ti.finish.{self.dag_id}.{self.task_id}.{self.state}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("ti.finish", tags={**self.stats_tags, "state": str(self.state)})
# Recording SKIPPED or SUCCESS
self.clear_next_method_args()
self.end_date = timezone.utcnow()
self._log_state()
self.set_duration()
# run on_success_callback before db committing
# otherwise, the LocalTaskJob sees the state is changed to `success`,
# but the task_runner is still running, LocalTaskJob then treats the state is set externally!
self._run_finished_callback(self.task.on_success_callback, context, "on_success")
if not test_mode:
session.add(Log(self.state, self))
session.merge(self).task = self.task
if self.state == TaskInstanceState.SUCCESS:
self._register_dataset_changes(session=session)
get_listener_manager().hook.on_task_instance_success(
previous_state=TaskInstanceState.RUNNING, task_instance=self, session=session
)
session.commit()
return None
def _register_dataset_changes(self, *, session: Session) -> None:
for obj in self.task.outlets or []:
self.log.debug("outlet obj %s", obj)
# Lineage can have other types of objects besides datasets
if isinstance(obj, Dataset):
dataset_manager.register_dataset_change(
task_instance=self,
dataset=obj,
session=session,
)
def _execute_task_with_callbacks(self, context, test_mode=False):
"""Prepare Task for Execution."""
from airflow.models.renderedtifields import RenderedTaskInstanceFields
parent_pid = os.getpid()
def signal_handler(signum, frame):
pid = os.getpid()
# If a task forks during execution (from DAG code) for whatever
# reason, we want to make sure that we react to the signal only in
# the process that we've spawned ourselves (referred to here as the
# parent process).
if pid != parent_pid:
os._exit(1)
return
self.log.error("Received SIGTERM. Terminating subprocesses.")
self.task.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
# Don't clear Xcom until the task is certain to execute, and check if we are resuming from deferral.
if not self.next_method:
self.clear_xcom_data()
with Stats.timer(f"dag.{self.task.dag_id}.{self.task.task_id}.duration", tags=self.stats_tags):
# Set the validated/merged params on the task object.
self.task.params = context["params"]
task_orig = self.render_templates(context=context)
if not test_mode:
rtif = RenderedTaskInstanceFields(ti=self, render_templates=False)
RenderedTaskInstanceFields.write(rtif)
RenderedTaskInstanceFields.delete_old_records(self.task_id, self.dag_id)
# Export context to make it available for operators to use.
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
os.environ.update(airflow_context_vars)
# Log context only for the default execution method, the assumption
# being that otherwise we're resuming a deferred task (in which
# case there's no need to log these again).
if not self.next_method:
self.log.info(
"Exporting env vars: %s",
" ".join(f"{k}={v!r}" for k, v in airflow_context_vars.items()),
)
# Run pre_execute callback
self.task.pre_execute(context=context)
# Run on_execute callback
self._run_execute_callback(context, self.task)
# Execute the task
with set_current_context(context):
result = self._execute_task(context, task_orig)
# Run post_execute callback
self.task.post_execute(context=context, result=result)
Stats.incr(f"operator_successes_{self.task.task_type}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("operator_successes", tags={**self.stats_tags, "task_type": self.task.task_type})
Stats.incr("ti_successes", tags=self.stats_tags)
def _run_finished_callback(
self,
callbacks: None | TaskStateChangeCallback | list[TaskStateChangeCallback],
context: Context,
callback_type: str,
) -> None:
"""Run callback after task finishes."""
if callbacks:
callbacks = callbacks if isinstance(callbacks, list) else [callbacks]
for callback in callbacks:
try:
callback(context)
except Exception:
callback_name = qualname(callback).split(".")[-1]
self.log.exception(
f"Error when executing {callback_name} callback" # type: ignore[attr-defined]
)
def _execute_task(self, context, task_orig):
"""Executes Task (optionally with a Timeout) and pushes Xcom results."""
task_to_execute = self.task
# If the task has been deferred and is being executed due to a trigger,
# then we need to pick the right method to come back to, otherwise
# we go for the default execute
if self.next_method:
# __fail__ is a special signal value for next_method that indicates
# this task was scheduled specifically to fail.
if self.next_method == "__fail__":
next_kwargs = self.next_kwargs or {}
traceback = self.next_kwargs.get("traceback")
if traceback is not None:
self.log.error("Trigger failed:\n%s", "\n".join(traceback))
raise TaskDeferralError(next_kwargs.get("error", "Unknown"))
# Grab the callable off the Operator/Task and add in any kwargs
execute_callable = getattr(task_to_execute, self.next_method)
if self.next_kwargs:
execute_callable = partial(execute_callable, **self.next_kwargs)
else:
execute_callable = task_to_execute.execute
# If a timeout is specified for the task, make it fail
# if it goes beyond
if task_to_execute.execution_timeout:
# If we are coming in with a next_method (i.e. from a deferral),
# calculate the timeout from our start_date.
if self.next_method:
timeout_seconds = (
task_to_execute.execution_timeout - (timezone.utcnow() - self.start_date)
).total_seconds()
else:
timeout_seconds = task_to_execute.execution_timeout.total_seconds()
try:
# It's possible we're already timed out, so fast-fail if true
if timeout_seconds <= 0:
raise AirflowTaskTimeout()
# Run task in timeout wrapper
with timeout(timeout_seconds):
result = execute_callable(context=context)
except AirflowTaskTimeout:
task_to_execute.on_kill()
raise
else:
result = execute_callable(context=context)
with create_session() as session:
if task_to_execute.do_xcom_push:
xcom_value = result
else:
xcom_value = None
if xcom_value is not None: # If the task returns a result, push an XCom containing it.
self.xcom_push(key=XCOM_RETURN_KEY, value=xcom_value, session=session)
self._record_task_map_for_downstreams(task_orig, xcom_value, session=session)
return result
@provide_session
def _defer_task(self, session: Session, defer: TaskDeferred) -> None:
"""
Marks the task as deferred and sets up the trigger that is needed
to resume it.
"""
from airflow.models.trigger import Trigger
# First, make the trigger entry
trigger_row = Trigger.from_object(defer.trigger)
session.add(trigger_row)
session.flush()
# Then, update ourselves so it matches the deferral request
# Keep an eye on the logic in `check_and_change_state_before_execution()`
# depending on self.next_method semantics
self.state = TaskInstanceState.DEFERRED
self.trigger_id = trigger_row.id
self.next_method = defer.method_name
self.next_kwargs = defer.kwargs or {}
# Decrement try number so the next one is the same try
self._try_number -= 1
# Calculate timeout too if it was passed
if defer.timeout is not None:
self.trigger_timeout = timezone.utcnow() + defer.timeout
else:
self.trigger_timeout = None
# If an execution_timeout is set, set the timeout to the minimum of
# it and the trigger timeout
execution_timeout = self.task.execution_timeout
if execution_timeout:
if self.trigger_timeout:
self.trigger_timeout = min(self.start_date + execution_timeout, self.trigger_timeout)
else:
self.trigger_timeout = self.start_date + execution_timeout
def _run_execute_callback(self, context: Context, task: Operator) -> None:
"""Functions that need to be run before a Task is executed."""
callbacks = task.on_execute_callback
if callbacks:
callbacks = callbacks if isinstance(callbacks, list) else [callbacks]
for callback in callbacks:
try:
callback(context)
except Exception:
self.log.exception("Failed when executing execute callback")
@provide_session
def run(
self,
verbose: bool = True,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
wait_for_past_depends_before_skipping: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
test_mode: bool = False,
job_id: str | None = None,
pool: str | None = None,
session: Session = NEW_SESSION,
) -> None:
"""Run TaskInstance."""
res = self.check_and_change_state_before_execution(
verbose=verbose,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
mark_success=mark_success,
test_mode=test_mode,
job_id=job_id,
pool=pool,
session=session,
)
if not res:
return
self._run_raw_task(
mark_success=mark_success, test_mode=test_mode, job_id=job_id, pool=pool, session=session
)
def dry_run(self) -> None:
"""Only Renders Templates for the TI."""
from airflow.models.baseoperator import BaseOperator
self.task = self.task.prepare_for_execution()
self.render_templates()
if TYPE_CHECKING:
assert isinstance(self.task, BaseOperator)
self.task.dry_run()
@provide_session
def _handle_reschedule(
self, actual_start_date, reschedule_exception, test_mode=False, session=NEW_SESSION
):
# Don't record reschedule request in test mode
if test_mode:
return
from airflow.models.dagrun import DagRun # Avoid circular import
self.refresh_from_db(session)
self.end_date = timezone.utcnow()
self.set_duration()
# Lock DAG run to be sure not to get into a deadlock situation when trying to insert
# TaskReschedule which apparently also creates lock on corresponding DagRun entity
with_row_locks(
session.query(DagRun).filter_by(
dag_id=self.dag_id,
run_id=self.run_id,
),
session=session,
).one()
# Log reschedule request
session.add(
TaskReschedule(
self.task,
self.run_id,
self._try_number,
actual_start_date,
self.end_date,
reschedule_exception.reschedule_date,
self.map_index,
)
)
# set state
self.state = TaskInstanceState.UP_FOR_RESCHEDULE
# Decrement try_number so subsequent runs will use the same try number and write
# to same log file.
self._try_number -= 1
self.clear_next_method_args()
session.merge(self)
session.commit()
self.log.info("Rescheduling task, marking task as UP_FOR_RESCHEDULE")
@staticmethod
def get_truncated_error_traceback(error: BaseException, truncate_to: Callable) -> TracebackType | None:
"""
Truncates the traceback of an exception to the first frame called from within a given function.
:param error: exception to get traceback from
:param truncate_to: Function to truncate TB to. Must have a ``__code__`` attribute
:meta private:
"""
tb = error.__traceback__
code = truncate_to.__func__.__code__ # type: ignore[attr-defined]
while tb is not None:
if tb.tb_frame.f_code is code:
return tb.tb_next
tb = tb.tb_next
return tb or error.__traceback__
@provide_session
def handle_failure(
self,
error: None | str | Exception | KeyboardInterrupt,
test_mode: bool | None = None,
context: Context | None = None,
force_fail: bool = False,
session: Session = NEW_SESSION,
) -> None:
"""Handle Failure for the TaskInstance."""
if test_mode is None:
test_mode = self.test_mode
get_listener_manager().hook.on_task_instance_failed(
previous_state=TaskInstanceState.RUNNING, task_instance=self, session=session
)
if error:
if isinstance(error, BaseException):
tb = self.get_truncated_error_traceback(error, truncate_to=self._execute_task)
self.log.error("Task failed with exception", exc_info=(type(error), error, tb))
else:
self.log.error("%s", error)
if not test_mode:
self.refresh_from_db(session)
self.end_date = timezone.utcnow()
self.set_duration()
Stats.incr(f"operator_failures_{self.operator}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("operator_failures", tags={**self.stats_tags, "operator": self.operator})
Stats.incr("ti_failures", tags=self.stats_tags)
if not test_mode:
session.add(Log(TaskInstanceState.FAILED.value, self))
# Log failure duration
session.add(TaskFail(ti=self))
self.clear_next_method_args()
# In extreme cases (zombie in case of dag with parse error) we might _not_ have a Task.
if context is None and getattr(self, "task", None):
context = self.get_template_context(session)
if context is not None:
context["exception"] = error
# Set state correctly and figure out how to log it and decide whether
# to email
# Note, callback invocation needs to be handled by caller of
# _run_raw_task to avoid race conditions which could lead to duplicate
# invocations or miss invocation.
# Since this function is called only when the TaskInstance state is running,
# try_number contains the current try_number (not the next). We
# only mark task instance as FAILED if the next task instance
# try_number exceeds the max_tries ... or if force_fail is truthy
task: BaseOperator | None = None
try:
if getattr(self, "task", None) and context:
task = self.task.unmap((context, session))
except Exception:
self.log.error("Unable to unmap task to determine if we need to send an alert email")
if force_fail or not self.is_eligible_to_retry():
self.state = TaskInstanceState.FAILED
email_for_state = operator.attrgetter("email_on_failure")
callbacks = task.on_failure_callback if task else None
callback_type = "on_failure"
if task and task.dag and task.dag.fail_stop:
tis = self.get_dagrun(session).get_task_instances()
stop_all_tasks_in_dag(tis, session, self.task_id)
else:
if self.state == TaskInstanceState.QUEUED:
# We increase the try_number so as to fail the task if it fails to start after sometime
self._try_number += 1
self.state = TaskInstanceState.UP_FOR_RETRY
email_for_state = operator.attrgetter("email_on_retry")
callbacks = task.on_retry_callback if task else None
callback_type = "on_retry"
self._log_state("Immediate failure requested. " if force_fail else "")
if task and email_for_state(task) and task.email:
try:
self.email_alert(error, task)
except Exception:
self.log.exception("Failed to send email to: %s", task.email)
if callbacks and context:
self._run_finished_callback(callbacks, context, callback_type)
if not test_mode:
session.merge(self)
session.flush()
def is_eligible_to_retry(self):
"""Is task instance is eligible for retry."""
if self.state == TaskInstanceState.RESTARTING:
# If a task is cleared when running, it goes into RESTARTING state and is always
# eligible for retry
return True
if not getattr(self, "task", None):
# Couldn't load the task, don't know number of retries, guess:
return self.try_number <= self.max_tries
return self.task.retries and self.try_number <= self.max_tries
def get_template_context(
self,
session: Session | None = None,
ignore_param_exceptions: bool = True,
) -> Context:
"""Return TI Context."""
# Do not use provide_session here -- it expunges everything on exit!
if not session:
session = settings.Session()
from airflow import macros
from airflow.models.abstractoperator import NotMapped
integrate_macros_plugins()
task = self.task
if TYPE_CHECKING:
assert task.dag
dag: DAG = task.dag
dag_run = self.get_dagrun(session)
data_interval = dag.get_run_data_interval(dag_run)
validated_params = process_params(dag, task, dag_run, suppress_exception=ignore_param_exceptions)
logical_date = timezone.coerce_datetime(self.execution_date)
ds = logical_date.strftime("%Y-%m-%d")
ds_nodash = ds.replace("-", "")
ts = logical_date.isoformat()
ts_nodash = logical_date.strftime("%Y%m%dT%H%M%S")
ts_nodash_with_tz = ts.replace("-", "").replace(":", "")
@cache # Prevent multiple database access.
def _get_previous_dagrun_success() -> DagRun | None:
return self.get_previous_dagrun(state=DagRunState.SUCCESS, session=session)
def _get_previous_dagrun_data_interval_success() -> DataInterval | None:
dagrun = _get_previous_dagrun_success()
if dagrun is None:
return None
return dag.get_run_data_interval(dagrun)
def get_prev_data_interval_start_success() -> pendulum.DateTime | None:
data_interval = _get_previous_dagrun_data_interval_success()
if data_interval is None:
return None
return data_interval.start
def get_prev_data_interval_end_success() -> pendulum.DateTime | None:
data_interval = _get_previous_dagrun_data_interval_success()
if data_interval is None:
return None
return data_interval.end
def get_prev_start_date_success() -> pendulum.DateTime | None:
dagrun = _get_previous_dagrun_success()
if dagrun is None:
return None
return timezone.coerce_datetime(dagrun.start_date)
@cache
def get_yesterday_ds() -> str:
return (logical_date - timedelta(1)).strftime("%Y-%m-%d")
def get_yesterday_ds_nodash() -> str:
return get_yesterday_ds().replace("-", "")
@cache
def get_tomorrow_ds() -> str:
return (logical_date + timedelta(1)).strftime("%Y-%m-%d")
def get_tomorrow_ds_nodash() -> str:
return get_tomorrow_ds().replace("-", "")
@cache
def get_next_execution_date() -> pendulum.DateTime | None:
# For manually triggered dagruns that aren't run on a schedule,
# the "next" execution date doesn't make sense, and should be set
# to execution date for consistency with how execution_date is set
# for manually triggered tasks, i.e. triggered_date == execution_date.
if dag_run.external_trigger:
return logical_date
if dag is None:
return None
next_info = dag.next_dagrun_info(data_interval, restricted=False)
if next_info is None:
return None
return timezone.coerce_datetime(next_info.logical_date)
def get_next_ds() -> str | None:
execution_date = get_next_execution_date()
if execution_date is None:
return None
return execution_date.strftime("%Y-%m-%d")
def get_next_ds_nodash() -> str | None:
ds = get_next_ds()
if ds is None:
return ds
return ds.replace("-", "")
@cache
def get_prev_execution_date():
# For manually triggered dagruns that aren't run on a schedule,
# the "previous" execution date doesn't make sense, and should be set
# to execution date for consistency with how execution_date is set
# for manually triggered tasks, i.e. triggered_date == execution_date.
if dag_run.external_trigger:
return logical_date
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return dag.previous_schedule(logical_date)
@cache
def get_prev_ds() -> str | None:
execution_date = get_prev_execution_date()
if execution_date is None:
return None
return execution_date.strftime(r"%Y-%m-%d")
def get_prev_ds_nodash() -> str | None:
prev_ds = get_prev_ds()
if prev_ds is None:
return None
return prev_ds.replace("-", "")
def get_triggering_events() -> dict[str, list[DatasetEvent]]:
if TYPE_CHECKING:
assert session is not None
# The dag_run may not be attached to the session anymore since the
# code base is over-zealous with use of session.expunge_all().
# Re-attach it if we get called.
nonlocal dag_run
if dag_run not in session:
dag_run = session.merge(dag_run, load=False)
dataset_events = dag_run.consumed_dataset_events
triggering_events: dict[str, list[DatasetEvent]] = defaultdict(list)
for event in dataset_events:
triggering_events[event.dataset.uri].append(event)
return triggering_events
try:
expanded_ti_count: int | None = task.get_mapped_ti_count(self.run_id, session=session)
except NotMapped:
expanded_ti_count = None
# NOTE: If you add anything to this dict, make sure to also update the
# definition in airflow/utils/context.pyi, and KNOWN_CONTEXT_KEYS in
# airflow/utils/context.py!
context = {
"conf": conf,
"dag": dag,
"dag_run": dag_run,
"data_interval_end": timezone.coerce_datetime(data_interval.end),
"data_interval_start": timezone.coerce_datetime(data_interval.start),
"ds": ds,
"ds_nodash": ds_nodash,
"execution_date": logical_date,
"expanded_ti_count": expanded_ti_count,
"inlets": task.inlets,
"logical_date": logical_date,
"macros": macros,
"next_ds": get_next_ds(),
"next_ds_nodash": get_next_ds_nodash(),
"next_execution_date": get_next_execution_date(),
"outlets": task.outlets,
"params": validated_params,
"prev_data_interval_start_success": get_prev_data_interval_start_success(),
"prev_data_interval_end_success": get_prev_data_interval_end_success(),
"prev_ds": get_prev_ds(),
"prev_ds_nodash": get_prev_ds_nodash(),
"prev_execution_date": get_prev_execution_date(),
"prev_execution_date_success": self.get_previous_execution_date(
state=DagRunState.SUCCESS,
session=session,
),
"prev_start_date_success": get_prev_start_date_success(),
"run_id": self.run_id,
"task": task,
"task_instance": self,
"task_instance_key_str": f"{task.dag_id}__{task.task_id}__{ds_nodash}",
"test_mode": self.test_mode,
"ti": self,
"tomorrow_ds": get_tomorrow_ds(),
"tomorrow_ds_nodash": get_tomorrow_ds_nodash(),
"triggering_dataset_events": lazy_object_proxy.Proxy(get_triggering_events),
"ts": ts,
"ts_nodash": ts_nodash,
"ts_nodash_with_tz": ts_nodash_with_tz,
"var": {
"json": VariableAccessor(deserialize_json=True),
"value": VariableAccessor(deserialize_json=False),
},
"conn": ConnectionAccessor(),
"yesterday_ds": get_yesterday_ds(),
"yesterday_ds_nodash": get_yesterday_ds_nodash(),
}
# Mypy doesn't like turning existing dicts in to a TypeDict -- and we "lie" in the type stub to say it
# is one, but in practice it isn't. See https://github.com/python/mypy/issues/8890
return Context(context) # type: ignore
@provide_session
def get_rendered_template_fields(self, session: Session = NEW_SESSION) -> None:
"""
Update task with rendered template fields for presentation in UI.
If task has already run, will fetch from DB; otherwise will render.
"""
from airflow.models.renderedtifields import RenderedTaskInstanceFields
rendered_task_instance_fields = RenderedTaskInstanceFields.get_templated_fields(self, session=session)
if rendered_task_instance_fields:
self.task = self.task.unmap(None)
for field_name, rendered_value in rendered_task_instance_fields.items():
setattr(self.task, field_name, rendered_value)
return
try:
# If we get here, either the task hasn't run or the RTIF record was purged.
from airflow.utils.log.secrets_masker import redact
self.render_templates()
for field_name in self.task.template_fields:
rendered_value = getattr(self.task, field_name)
setattr(self.task, field_name, redact(rendered_value, field_name))
except (TemplateAssertionError, UndefinedError) as e:
raise AirflowException(
"Webserver does not have access to User-defined Macros or Filters "
"when Dag Serialization is enabled. Hence for the task that have not yet "
"started running, please use 'airflow tasks render' for debugging the "
"rendering of template_fields."
) from e
@provide_session
def get_rendered_k8s_spec(self, session: Session = NEW_SESSION):
"""Fetch rendered template fields from DB."""
from airflow.models.renderedtifields import RenderedTaskInstanceFields
rendered_k8s_spec = RenderedTaskInstanceFields.get_k8s_pod_yaml(self, session=session)
if not rendered_k8s_spec:
try:
rendered_k8s_spec = self.render_k8s_pod_yaml()
except (TemplateAssertionError, UndefinedError) as e:
raise AirflowException(f"Unable to render a k8s spec for this taskinstance: {e}") from e
return rendered_k8s_spec
def overwrite_params_with_dag_run_conf(self, params, dag_run):
"""Overwrite Task Params with DagRun.conf."""
if dag_run and dag_run.conf:
self.log.debug("Updating task params (%s) with DagRun.conf (%s)", params, dag_run.conf)
params.update(dag_run.conf)
def render_templates(self, context: Context | None = None) -> Operator:
"""Render templates in the operator fields.
If the task was originally mapped, this may replace ``self.task`` with
the unmapped, fully rendered BaseOperator. The original ``self.task``
before replacement is returned.
"""
if not context:
context = self.get_template_context()
original_task = self.task
# If self.task is mapped, this call replaces self.task to point to the
# unmapped BaseOperator created by this function! This is because the
# MappedOperator is useless for template rendering, and we need to be
# able to access the unmapped task instead.
original_task.render_template_fields(context)
return original_task
def render_k8s_pod_yaml(self) -> dict | None:
"""Render k8s pod yaml."""
from kubernetes.client.api_client import ApiClient
from airflow.kubernetes.kube_config import KubeConfig
from airflow.kubernetes.kubernetes_helper_functions import create_pod_id # Circular import
from airflow.kubernetes.pod_generator import PodGenerator
kube_config = KubeConfig()
pod = PodGenerator.construct_pod(
dag_id=self.dag_id,
run_id=self.run_id,
task_id=self.task_id,
map_index=self.map_index,
date=None,
pod_id=create_pod_id(self.dag_id, self.task_id),
try_number=self.try_number,
kube_image=kube_config.kube_image,
args=self.command_as_list(),
pod_override_object=PodGenerator.from_obj(self.executor_config),
scheduler_job_id="0",
namespace=kube_config.executor_namespace,
base_worker_pod=PodGenerator.deserialize_model_file(kube_config.pod_template_file),
with_mutation_hook=True,
)
sanitized_pod = ApiClient().sanitize_for_serialization(pod)
return sanitized_pod
def get_email_subject_content(
self, exception: BaseException, task: BaseOperator | None = None
) -> tuple[str, str, str]:
"""Get the email subject content for exceptions."""
# For a ti from DB (without ti.task), return the default value
if task is None:
task = getattr(self, "task")
use_default = task is None
exception_html = str(exception).replace("\n", "<br>")
default_subject = "Airflow alert: {{ti}}"
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Try 1 instead of
# Try 0 for the first attempt).
default_html_content = (
"Try {{try_number}} out of {{max_tries + 1}}<br>"
"Exception:<br>{{exception_html}}<br>"
'Log: <a href="{{ti.log_url}}">Link</a><br>'
"Host: {{ti.hostname}}<br>"
'Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>'
)
default_html_content_err = (
"Try {{try_number}} out of {{max_tries + 1}}<br>"
"Exception:<br>Failed attempt to attach error logs<br>"
'Log: <a href="{{ti.log_url}}">Link</a><br>'
"Host: {{ti.hostname}}<br>"
'Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>'
)
# This function is called after changing the state from RUNNING,
# so we need to subtract 1 from self.try_number here.
current_try_number = self.try_number - 1
additional_context: dict[str, Any] = {
"exception": exception,
"exception_html": exception_html,
"try_number": current_try_number,
"max_tries": self.max_tries,
}
if use_default:
default_context = {"ti": self, **additional_context}
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=True
)
subject = jinja_env.from_string(default_subject).render(**default_context)
html_content = jinja_env.from_string(default_html_content).render(**default_context)
html_content_err = jinja_env.from_string(default_html_content_err).render(**default_context)
else:
# Use the DAG's get_template_env() to set force_sandboxed. Don't add
# the flag to the function on task object -- that function can be
# overridden, and adding a flag breaks backward compatibility.
dag = self.task.get_dag()
if dag:
jinja_env = dag.get_template_env(force_sandboxed=True)
else:
jinja_env = SandboxedEnvironment(cache_size=0)
jinja_context = self.get_template_context()
context_merge(jinja_context, additional_context)
def render(key: str, content: str) -> str:
if conf.has_option("email", key):
path = conf.get_mandatory_value("email", key)
try:
with open(path) as f:
content = f.read()
except FileNotFoundError:
self.log.warning(f"Could not find email template file '{path!r}'. Using defaults...")
except OSError:
self.log.exception(f"Error while using email template '{path!r}'. Using defaults...")
return render_template_to_string(jinja_env.from_string(content), jinja_context)
subject = render("subject_template", default_subject)
html_content = render("html_content_template", default_html_content)
html_content_err = render("html_content_template", default_html_content_err)
return subject, html_content, html_content_err
def email_alert(self, exception, task: BaseOperator) -> None:
"""Send alert email with exception information."""
subject, html_content, html_content_err = self.get_email_subject_content(exception, task=task)
assert task.email
try:
send_email(task.email, subject, html_content)
except Exception:
send_email(task.email, subject, html_content_err)
def set_duration(self) -> None:
"""Set TI duration."""
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
self.log.debug("Task Duration set to %s", self.duration)
def _record_task_map_for_downstreams(self, task: Operator, value: Any, *, session: Session) -> None:
if next(task.iter_mapped_dependants(), None) is None: # No mapped dependants, no need to validate.
return
# TODO: We don't push TaskMap for mapped task instances because it's not
# currently possible for a downstream to depend on one individual mapped
# task instance. This will change when we implement task mapping inside
# a mapped task group, and we'll need to further analyze the case.
if isinstance(task, MappedOperator):
return
if value is None:
raise XComForMappingNotPushed()
if not _is_mappable_value(value):
raise UnmappableXComTypePushed(value)
task_map = TaskMap.from_task_instance_xcom(self, value)
max_map_length = conf.getint("core", "max_map_length", fallback=1024)
if task_map.length > max_map_length:
raise UnmappableXComLengthPushed(value, max_map_length)
session.merge(task_map)
@provide_session
def xcom_push(
self,
key: str,
value: Any,
execution_date: datetime | None = None,
session: Session = NEW_SESSION,
) -> None:
"""
Make an XCom available for tasks to pull.
:param key: Key to store the value under.
:param value: Value to store. What types are possible depends on whether
``enable_xcom_pickling`` is true or not. If so, this can be any
picklable object; only be JSON-serializable may be used otherwise.
:param execution_date: Deprecated parameter that has no effect.
"""
if execution_date is not None:
self_execution_date = self.get_dagrun(session).execution_date
if execution_date < self_execution_date:
raise ValueError(
f"execution_date can not be in the past (current execution_date is "
f"{self_execution_date}; received {execution_date})"
)
elif execution_date is not None:
message = "Passing 'execution_date' to 'TaskInstance.xcom_push()' is deprecated."
warnings.warn(message, RemovedInAirflow3Warning, stacklevel=3)
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
run_id=self.run_id,
map_index=self.map_index,
session=session,
)
@provide_session
def xcom_pull(
self,
task_ids: str | Iterable[str] | None = None,
dag_id: str | None = None,
key: str = XCOM_RETURN_KEY,
include_prior_dates: bool = False,
session: Session = NEW_SESSION,
*,
map_indexes: int | Iterable[int] | None = None,
default: Any = None,
) -> Any:
"""Pull XComs that optionally meet certain criteria.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is ``'return_value'``, also
available as constant ``XCOM_RETURN_KEY``. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass *None*.
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Pass *None* to remove the filter.
:param dag_id: If provided, only pulls XComs from this DAG. If *None*
(default), the DAG of the calling task is used.
:param map_indexes: If provided, only pull XComs with matching indexes.
If *None* (default), this is inferred from the task(s) being pulled
(see below for details).
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If *True*, XComs from previous dates
are returned as well.
When pulling one single task (``task_id`` is *None* or a str) without
specifying ``map_indexes``, the return value is inferred from whether
the specified task is mapped. If not, value from the one single task
instance is returned. If the task to pull is mapped, an iterator (not a
list) yielding XComs from mapped task instances is returned. In either
case, ``default`` (*None* if not specified) is returned if no matching
XComs are found.
When pulling multiple tasks (i.e. either ``task_id`` or ``map_index`` is
a non-str iterable), a list of matching XComs is returned. Elements in
the list is ordered by item ordering in ``task_id`` and ``map_index``.
"""
if dag_id is None:
dag_id = self.dag_id
query = XCom.get_many(
key=key,
run_id=self.run_id,
dag_ids=dag_id,
task_ids=task_ids,
map_indexes=map_indexes,
include_prior_dates=include_prior_dates,
session=session,
)
# NOTE: Since we're only fetching the value field and not the whole
# class, the @recreate annotation does not kick in. Therefore we need to
# call XCom.deserialize_value() manually.
# We are only pulling one single task.
if (task_ids is None or isinstance(task_ids, str)) and not isinstance(map_indexes, Iterable):
first = query.with_entities(
XCom.run_id, XCom.task_id, XCom.dag_id, XCom.map_index, XCom.value
).first()
if first is None: # No matching XCom at all.
return default
if map_indexes is not None or first.map_index < 0:
return XCom.deserialize_value(first)
query = query.order_by(None).order_by(XCom.map_index.asc())
return LazyXComAccess.build_from_xcom_query(query)
# At this point either task_ids or map_indexes is explicitly multi-value.
# Order return values to match task_ids and map_indexes ordering.
query = query.order_by(None)
if task_ids is None or isinstance(task_ids, str):
query = query.order_by(XCom.task_id)
else:
task_id_whens = {tid: i for i, tid in enumerate(task_ids)}
if task_id_whens:
query = query.order_by(case(task_id_whens, value=XCom.task_id))
else:
query = query.order_by(XCom.task_id)
if map_indexes is None or isinstance(map_indexes, int):
query = query.order_by(XCom.map_index)
elif isinstance(map_indexes, range):
order = XCom.map_index
if map_indexes.step < 0:
order = order.desc()
query = query.order_by(order)
else:
map_index_whens = {map_index: i for i, map_index in enumerate(map_indexes)}
if map_index_whens:
query = query.order_by(case(map_index_whens, value=XCom.map_index))
else:
query = query.order_by(XCom.map_index)
return LazyXComAccess.build_from_xcom_query(query)
@provide_session
def get_num_running_task_instances(self, session: Session, same_dagrun=False) -> int:
"""Return Number of running TIs from the DB."""
# .count() is inefficient
num_running_task_instances_query = session.query(func.count()).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.state == TaskInstanceState.RUNNING,
)
if same_dagrun:
num_running_task_instances_query = num_running_task_instances_query.filter(
TaskInstance.run_id == self.run_id
)
return num_running_task_instances_query.scalar()
def init_run_context(self, raw: bool = False) -> None:
"""Sets the log context."""
self.raw = raw
self._set_context(self)
@staticmethod
def filter_for_tis(tis: Iterable[TaskInstance | TaskInstanceKey]) -> BooleanClauseList | None:
"""Returns SQLAlchemy filter to query selected task instances."""
# DictKeys type, (what we often pass here from the scheduler) is not directly indexable :(
# Or it might be a generator, but we need to be able to iterate over it more than once
tis = list(tis)
if not tis:
return None
first = tis[0]
dag_id = first.dag_id
run_id = first.run_id
map_index = first.map_index
first_task_id = first.task_id
# pre-compute the set of dag_id, run_id, map_indices and task_ids
dag_ids, run_ids, map_indices, task_ids = set(), set(), set(), set()
for t in tis:
dag_ids.add(t.dag_id)
run_ids.add(t.run_id)
map_indices.add(t.map_index)
task_ids.add(t.task_id)
# Common path optimisations: when all TIs are for the same dag_id and run_id, or same dag_id
# and task_id -- this can be over 150x faster for huge numbers of TIs (20k+)
if dag_ids == {dag_id} and run_ids == {run_id} and map_indices == {map_index}:
return and_(
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index == map_index,
TaskInstance.task_id.in_(task_ids),
)
if dag_ids == {dag_id} and task_ids == {first_task_id} and map_indices == {map_index}:
return and_(
TaskInstance.dag_id == dag_id,
TaskInstance.run_id.in_(run_ids),
TaskInstance.map_index == map_index,
TaskInstance.task_id == first_task_id,
)
if dag_ids == {dag_id} and run_ids == {run_id} and task_ids == {first_task_id}:
return and_(
TaskInstance.dag_id == dag_id,
TaskInstance.run_id == run_id,
TaskInstance.map_index.in_(map_indices),
TaskInstance.task_id == first_task_id,
)
filter_condition = []
# create 2 nested groups, both primarily grouped by dag_id and run_id,
# and in the nested group 1 grouped by task_id the other by map_index.
task_id_groups: dict[tuple, dict[Any, list[Any]]] = defaultdict(lambda: defaultdict(list))
map_index_groups: dict[tuple, dict[Any, list[Any]]] = defaultdict(lambda: defaultdict(list))
for t in tis:
task_id_groups[(t.dag_id, t.run_id)][t.task_id].append(t.map_index)
map_index_groups[(t.dag_id, t.run_id)][t.map_index].append(t.task_id)
# this assumes that most dags have dag_id as the largest grouping, followed by run_id. even
# if its not, this is still a significant optimization over querying for every single tuple key
for cur_dag_id in dag_ids:
for cur_run_id in run_ids:
# we compare the group size between task_id and map_index and use the smaller group
dag_task_id_groups = task_id_groups[(cur_dag_id, cur_run_id)]
dag_map_index_groups = map_index_groups[(cur_dag_id, cur_run_id)]
if len(dag_task_id_groups) <= len(dag_map_index_groups):
for cur_task_id, cur_map_indices in dag_task_id_groups.items():
filter_condition.append(
and_(
TaskInstance.dag_id == cur_dag_id,
TaskInstance.run_id == cur_run_id,
TaskInstance.task_id == cur_task_id,
TaskInstance.map_index.in_(cur_map_indices),
)
)
else:
for cur_map_index, cur_task_ids in dag_map_index_groups.items():
filter_condition.append(
and_(
TaskInstance.dag_id == cur_dag_id,
TaskInstance.run_id == cur_run_id,
TaskInstance.task_id.in_(cur_task_ids),
TaskInstance.map_index == cur_map_index,
)
)
return or_(*filter_condition)
@classmethod
def ti_selector_condition(cls, vals: Collection[str | tuple[str, int]]) -> ColumnOperators:
"""
Build an SQLAlchemy filter for a list where each element can contain
whether a task_id, or a tuple of (task_id,map_index).
:meta private:
"""
# Compute a filter for TI.task_id and TI.map_index based on input values
# For each item, it will either be a task_id, or (task_id, map_index)
task_id_only = [v for v in vals if isinstance(v, str)]
with_map_index = [v for v in vals if not isinstance(v, str)]
filters: list[ColumnOperators] = []
if task_id_only:
filters.append(cls.task_id.in_(task_id_only))
if with_map_index:
filters.append(tuple_in_condition((cls.task_id, cls.map_index), with_map_index))
if not filters:
return false()
if len(filters) == 1:
return filters[0]
return or_(*filters)
@Sentry.enrich_errors
@provide_session
def schedule_downstream_tasks(self, session: Session = NEW_SESSION, max_tis_per_query: int | None = None):
"""
The mini-scheduler for scheduling downstream tasks of this task instance.
:meta: private
"""
from sqlalchemy.exc import OperationalError
from airflow.models import DagRun
try:
# Re-select the row with a lock
dag_run = with_row_locks(
session.query(DagRun).filter_by(
dag_id=self.dag_id,
run_id=self.run_id,
),
session=session,
).one()
task = self.task
if TYPE_CHECKING:
assert task.dag
# Get a partial DAG with just the specific tasks we want to examine.
# In order for dep checks to work correctly, we include ourself (so
# TriggerRuleDep can check the state of the task we just executed).
partial_dag = task.dag.partial_subset(
task.downstream_task_ids,
include_downstream=True,
include_upstream=False,
include_direct_upstream=True,
)
dag_run.dag = partial_dag
info = dag_run.task_instance_scheduling_decisions(session)
skippable_task_ids = {
task_id for task_id in partial_dag.task_ids if task_id not in task.downstream_task_ids
}
schedulable_tis = [
ti
for ti in info.schedulable_tis
if ti.task_id not in skippable_task_ids
and not (
ti.task.inherits_from_empty_operator
and not ti.task.on_execute_callback
and not ti.task.on_success_callback
and not ti.task.outlets
)
]
for schedulable_ti in schedulable_tis:
if not hasattr(schedulable_ti, "task"):
schedulable_ti.task = task.dag.get_task(schedulable_ti.task_id)
num = dag_run.schedule_tis(schedulable_tis, session=session, max_tis_per_query=max_tis_per_query)
self.log.info("%d downstream tasks scheduled from follow-on schedule check", num)
session.flush()
except OperationalError as e:
# Any kind of DB error here is _non fatal_ as this block is just an optimisation.
self.log.info(
"Skipping mini scheduling run due to exception: %s",
e.statement,
exc_info=True,
)
session.rollback()
def get_relevant_upstream_map_indexes(
self,
upstream: Operator,
ti_count: int | None,
*,
session: Session,
) -> int | range | None:
"""Infer the map indexes of an upstream "relevant" to this ti.
The bulk of the logic mainly exists to solve the problem described by
the following example, where 'val' must resolve to different values,
depending on where the reference is being used::
@task
def this_task(v): # This is self.task.
return v * 2
@task_group
def tg1(inp):
val = upstream(inp) # This is the upstream task.
this_task(val) # When inp is 1, val here should resolve to 2.
return val
# This val is the same object returned by tg1.
val = tg1.expand(inp=[1, 2, 3])
@task_group
def tg2(inp):
another_task(inp, val) # val here should resolve to [2, 4, 6].
tg2.expand(inp=["a", "b"])
The surrounding mapped task groups of ``upstream`` and ``self.task`` are
inspected to find a common "ancestor". If such an ancestor is found,
we need to return specific map indexes to pull a partial value from
upstream XCom.
:param upstream: The referenced upstream task.
:param ti_count: The total count of task instance this task was expanded
by the scheduler, i.e. ``expanded_ti_count`` in the template context.
:return: Specific map index or map indexes to pull, or ``None`` if we
want to "whole" return value (i.e. no mapped task groups involved).
"""
# This value should never be None since we already know the current task
# is in a mapped task group, and should have been expanded, despite that,
# we need to check that it is not None to satisfy Mypy.
# But this value can be 0 when we expand an empty list, for that it is
# necessary to check that ti_count is not 0 to avoid dividing by 0.
if not ti_count:
return None
# Find the innermost common mapped task group between the current task
# If the current task and the referenced task does not have a common
# mapped task group, the two are in different task mapping contexts
# (like another_task above), and we should use the "whole" value.
common_ancestor = _find_common_ancestor_mapped_group(self.task, upstream)
if common_ancestor is None:
return None
# At this point we know the two tasks share a mapped task group, and we
# should use a "partial" value. Let's break down the mapped ti count
# between the ancestor and further expansion happened inside it.
ancestor_ti_count = common_ancestor.get_mapped_ti_count(self.run_id, session=session)
ancestor_map_index = self.map_index * ancestor_ti_count // ti_count
# If the task is NOT further expanded inside the common ancestor, we
# only want to reference one single ti. We must walk the actual DAG,
# and "ti_count == ancestor_ti_count" does not work, since the further
# expansion may be of length 1.
if not _is_further_mapped_inside(upstream, common_ancestor):
return ancestor_map_index
# Otherwise we need a partial aggregation for values from selected task
# instances in the ancestor's expansion context.
further_count = ti_count // ancestor_ti_count
map_index_start = ancestor_map_index * further_count
return range(map_index_start, map_index_start + further_count)
def clear_db_references(self, session):
"""
Clear db tables that have a reference to this instance.
:param session: ORM Session
:meta private:
"""
from airflow.models.renderedtifields import RenderedTaskInstanceFields
tables = [TaskFail, TaskInstanceNote, TaskReschedule, XCom, RenderedTaskInstanceFields]
for table in tables:
session.execute(
delete(table).where(
table.dag_id == self.dag_id,
table.task_id == self.task_id,
table.run_id == self.run_id,
table.map_index == self.map_index,
)
)
def _find_common_ancestor_mapped_group(node1: Operator, node2: Operator) -> MappedTaskGroup | None:
"""Given two operators, find their innermost common mapped task group."""
if node1.dag is None or node2.dag is None or node1.dag_id != node2.dag_id:
return None
parent_group_ids = {g.group_id for g in node1.iter_mapped_task_groups()}
common_groups = (g for g in node2.iter_mapped_task_groups() if g.group_id in parent_group_ids)
return next(common_groups, None)
def _is_further_mapped_inside(operator: Operator, container: TaskGroup) -> bool:
"""Whether given operator is *further* mapped inside a task group."""
if isinstance(operator, MappedOperator):
return True
task_group = operator.task_group
while task_group is not None and task_group.group_id != container.group_id:
if isinstance(task_group, MappedTaskGroup):
return True
task_group = task_group.parent_group
return False
# State of the task instance.
# Stores string version of the task state.
TaskInstanceStateType = Tuple[TaskInstanceKey, TaskInstanceState]
class SimpleTaskInstance:
"""
Simplified Task Instance.
Used to send data between processes via Queues.
"""
def __init__(
self,
dag_id: str,
task_id: str,
run_id: str,
start_date: datetime | None,
end_date: datetime | None,
try_number: int,
map_index: int,
state: str,
executor_config: Any,
pool: str,
queue: str,
key: TaskInstanceKey,
run_as_user: str | None = None,
priority_weight: int | None = None,
):
self.dag_id = dag_id
self.task_id = task_id
self.run_id = run_id
self.map_index = map_index
self.start_date = start_date
self.end_date = end_date
self.try_number = try_number
self.state = state
self.executor_config = executor_config
self.run_as_user = run_as_user
self.pool = pool
self.priority_weight = priority_weight
self.queue = queue
self.key = key
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def as_dict(self):
warnings.warn(
"This method is deprecated. Use BaseSerialization.serialize.",
RemovedInAirflow3Warning,
stacklevel=2,
)
new_dict = dict(self.__dict__)
for key in new_dict:
if key in ["start_date", "end_date"]:
val = new_dict[key]
if not val or isinstance(val, str):
continue
new_dict.update({key: val.isoformat()})
return new_dict
@classmethod
def from_ti(cls, ti: TaskInstance) -> SimpleTaskInstance:
return cls(
dag_id=ti.dag_id,
task_id=ti.task_id,
run_id=ti.run_id,
map_index=ti.map_index,
start_date=ti.start_date,
end_date=ti.end_date,
try_number=ti.try_number,
state=ti.state,
executor_config=ti.executor_config,
pool=ti.pool,
queue=ti.queue,
key=ti.key,
run_as_user=ti.run_as_user if hasattr(ti, "run_as_user") else None,
priority_weight=ti.priority_weight if hasattr(ti, "priority_weight") else None,
)
@classmethod
def from_dict(cls, obj_dict: dict) -> SimpleTaskInstance:
warnings.warn(
"This method is deprecated. Use BaseSerialization.deserialize.",
RemovedInAirflow3Warning,
stacklevel=2,
)
ti_key = TaskInstanceKey(*obj_dict.pop("key"))
start_date = None
end_date = None
start_date_str: str | None = obj_dict.pop("start_date")
end_date_str: str | None = obj_dict.pop("end_date")
if start_date_str:
start_date = timezone.parse(start_date_str)
if end_date_str:
end_date = timezone.parse(end_date_str)
return cls(**obj_dict, start_date=start_date, end_date=end_date, key=ti_key)
class TaskInstanceNote(Base):
"""For storage of arbitrary notes concerning the task instance."""
__tablename__ = "task_instance_note"
user_id = Column(Integer, nullable=True)
task_id = Column(StringID(), primary_key=True, nullable=False)
dag_id = Column(StringID(), primary_key=True, nullable=False)
run_id = Column(StringID(), primary_key=True, nullable=False)
map_index = Column(Integer, primary_key=True, nullable=False)
content = Column(String(1000).with_variant(Text(1000), "mysql"))
created_at = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
updated_at = Column(UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow, nullable=False)
task_instance = relationship("TaskInstance", back_populates="task_instance_note")
__table_args__ = (
PrimaryKeyConstraint(
"task_id", "dag_id", "run_id", "map_index", name="task_instance_note_pkey", mssql_clustered=True
),
ForeignKeyConstraint(
(dag_id, task_id, run_id, map_index),
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_instance_note_ti_fkey",
ondelete="CASCADE",
),
ForeignKeyConstraint(
(user_id,),
["ab_user.id"],
name="task_instance_note_user_fkey",
),
)
def __init__(self, content, user_id=None):
self.content = content
self.user_id = user_id
def __repr__(self):
prefix = f"<{self.__class__.__name__}: {self.dag_id}.{self.task_id} {self.run_id}"
if self.map_index != -1:
prefix += f" map_index={self.map_index}"
return prefix + ">"
STATICA_HACK = True
globals()["kcah_acitats"[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
from airflow.jobs.job import Job
TaskInstance.queued_by_job = relationship(Job)
| 125,676 | 40.178571 | 110 |
py
|
airflow
|
airflow-main/airflow/models/db_callback_request.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from importlib import import_module
from sqlalchemy import Column, Integer, String
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.models.base import Base
from airflow.utils import timezone
from airflow.utils.sqlalchemy import ExtendedJSON, UtcDateTime
class DbCallbackRequest(Base):
"""Used to handle callbacks through database."""
__tablename__ = "callback_request"
id = Column(Integer(), nullable=False, primary_key=True)
created_at = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
priority_weight = Column(Integer(), nullable=False)
callback_data = Column(ExtendedJSON, nullable=False)
callback_type = Column(String(20), nullable=False)
processor_subdir = Column(String(2000), nullable=True)
def __init__(self, priority_weight: int, callback: CallbackRequest):
self.created_at = timezone.utcnow()
self.priority_weight = priority_weight
self.processor_subdir = callback.processor_subdir
self.callback_data = callback.to_json()
self.callback_type = callback.__class__.__name__
def get_callback_request(self) -> CallbackRequest:
module = import_module("airflow.callbacks.callback_requests")
callback_class = getattr(module, self.callback_type)
# Get the function (from the instance) that we need to call
from_json = getattr(callback_class, "from_json")
return from_json(self.callback_data)
| 2,285 | 40.563636 | 77 |
py
|
airflow
|
airflow-main/airflow/models/baseoperator.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base operator for all operators."""
from __future__ import annotations
import abc
import collections
import collections.abc
import contextlib
import copy
import functools
import logging
import sys
import warnings
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from inspect import signature
from types import ClassMethodDescriptorType, FunctionType
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Collection,
Iterable,
List,
Sequence,
Type,
TypeVar,
Union,
cast,
)
import attr
import pendulum
from dateutil.relativedelta import relativedelta
from sqlalchemy import select
from sqlalchemy.orm import Session
from sqlalchemy.orm.exc import NoResultFound
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DagInvalidTriggerRule, RemovedInAirflow3Warning, TaskDeferred
from airflow.lineage import apply_lineage, prepare_lineage
from airflow.models.abstractoperator import (
DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST,
DEFAULT_OWNER,
DEFAULT_POOL_SLOTS,
DEFAULT_PRIORITY_WEIGHT,
DEFAULT_QUEUE,
DEFAULT_RETRIES,
DEFAULT_RETRY_DELAY,
DEFAULT_TASK_EXECUTION_TIMEOUT,
DEFAULT_TRIGGER_RULE,
DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING,
DEFAULT_WEIGHT_RULE,
AbstractOperator,
TaskStateChangeCallback,
)
from airflow.models.mappedoperator import OperatorPartial, validate_mapping_kwargs
from airflow.models.param import ParamsDict
from airflow.models.pool import Pool
from airflow.models.taskinstance import TaskInstance, clear_task_instances
from airflow.models.taskmixin import DependencyMixin
from airflow.serialization.enums import DagAttributeTypes
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.ti_deps.deps.not_previously_skipped_dep import NotPreviouslySkippedDep
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.triggers.base import BaseTrigger
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.decorators import fixup_decorator_warning_stack
from airflow.utils.edgemodifier import EdgeModifier
from airflow.utils.helpers import validate_key
from airflow.utils.operator_resources import Resources
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.setup_teardown import SetupTeardownContext
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import NOTSET, ArgNotSet
from airflow.utils.weight_rule import WeightRule
from airflow.utils.xcom import XCOM_RETURN_KEY
if TYPE_CHECKING:
import jinja2 # Slow import.
from airflow.models.dag import DAG
from airflow.models.operator import Operator
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
ScheduleInterval = Union[str, timedelta, relativedelta]
TaskPreExecuteHook = Callable[[Context], None]
TaskPostExecuteHook = Callable[[Context, Any], None]
T = TypeVar("T", bound=FunctionType)
logger = logging.getLogger("airflow.models.baseoperator.BaseOperator")
def parse_retries(retries: Any) -> int | None:
if retries is None or isinstance(retries, int):
return retries
try:
parsed_retries = int(retries)
except (TypeError, ValueError):
raise AirflowException(f"'retries' type must be int, not {type(retries).__name__}")
logger.warning("Implicitly converting 'retries' from %r to int", retries)
return parsed_retries
def coerce_timedelta(value: float | timedelta, *, key: str) -> timedelta:
if isinstance(value, timedelta):
return value
logger.debug("%s isn't a timedelta object, assuming secs", key)
return timedelta(seconds=value)
def coerce_resources(resources: dict[str, Any] | None) -> Resources | None:
if resources is None:
return None
return Resources(**resources)
def _get_parent_defaults(dag: DAG | None, task_group: TaskGroup | None) -> tuple[dict, ParamsDict]:
if not dag:
return {}, ParamsDict()
dag_args = copy.copy(dag.default_args)
dag_params = copy.deepcopy(dag.params)
if task_group:
if task_group.default_args and not isinstance(task_group.default_args, collections.abc.Mapping):
raise TypeError("default_args must be a mapping")
dag_args.update(task_group.default_args)
return dag_args, dag_params
def get_merged_defaults(
dag: DAG | None,
task_group: TaskGroup | None,
task_params: collections.abc.MutableMapping | None,
task_default_args: dict | None,
) -> tuple[dict, ParamsDict]:
args, params = _get_parent_defaults(dag, task_group)
if task_params:
if not isinstance(task_params, collections.abc.Mapping):
raise TypeError("params must be a mapping")
params.update(task_params)
if task_default_args:
if not isinstance(task_default_args, collections.abc.Mapping):
raise TypeError("default_args must be a mapping")
args.update(task_default_args)
with contextlib.suppress(KeyError):
params.update(task_default_args["params"] or {})
return args, params
class _PartialDescriptor:
"""A descriptor that guards against ``.partial`` being called on Task objects."""
class_method: ClassMethodDescriptorType | None = None
def __get__(
self, obj: BaseOperator, cls: type[BaseOperator] | None = None
) -> Callable[..., OperatorPartial]:
# Call this "partial" so it looks nicer in stack traces.
def partial(**kwargs):
raise TypeError("partial can only be called on Operator classes, not Tasks themselves")
if obj is not None:
return partial
return self.class_method.__get__(cls, cls)
_PARTIAL_DEFAULTS = {
"owner": DEFAULT_OWNER,
"trigger_rule": DEFAULT_TRIGGER_RULE,
"depends_on_past": False,
"ignore_first_depends_on_past": DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST,
"wait_for_past_depends_before_skipping": DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING,
"wait_for_downstream": False,
"retries": DEFAULT_RETRIES,
"queue": DEFAULT_QUEUE,
"pool_slots": DEFAULT_POOL_SLOTS,
"execution_timeout": DEFAULT_TASK_EXECUTION_TIMEOUT,
"retry_delay": DEFAULT_RETRY_DELAY,
"retry_exponential_backoff": False,
"priority_weight": DEFAULT_PRIORITY_WEIGHT,
"weight_rule": DEFAULT_WEIGHT_RULE,
"inlets": [],
"outlets": [],
}
# This is what handles the actual mapping.
def partial(
operator_class: type[BaseOperator],
*,
task_id: str,
dag: DAG | None = None,
task_group: TaskGroup | None = None,
start_date: datetime | ArgNotSet = NOTSET,
end_date: datetime | ArgNotSet = NOTSET,
owner: str | ArgNotSet = NOTSET,
email: None | str | Iterable[str] | ArgNotSet = NOTSET,
params: collections.abc.MutableMapping | None = None,
resources: dict[str, Any] | None | ArgNotSet = NOTSET,
trigger_rule: str | ArgNotSet = NOTSET,
depends_on_past: bool | ArgNotSet = NOTSET,
ignore_first_depends_on_past: bool | ArgNotSet = NOTSET,
wait_for_past_depends_before_skipping: bool | ArgNotSet = NOTSET,
wait_for_downstream: bool | ArgNotSet = NOTSET,
retries: int | None | ArgNotSet = NOTSET,
queue: str | ArgNotSet = NOTSET,
pool: str | ArgNotSet = NOTSET,
pool_slots: int | ArgNotSet = NOTSET,
execution_timeout: timedelta | None | ArgNotSet = NOTSET,
max_retry_delay: None | timedelta | float | ArgNotSet = NOTSET,
retry_delay: timedelta | float | ArgNotSet = NOTSET,
retry_exponential_backoff: bool | ArgNotSet = NOTSET,
priority_weight: int | ArgNotSet = NOTSET,
weight_rule: str | ArgNotSet = NOTSET,
sla: timedelta | None | ArgNotSet = NOTSET,
max_active_tis_per_dag: int | None | ArgNotSet = NOTSET,
max_active_tis_per_dagrun: int | None | ArgNotSet = NOTSET,
on_execute_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] | ArgNotSet = NOTSET,
on_failure_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] | ArgNotSet = NOTSET,
on_success_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] | ArgNotSet = NOTSET,
on_retry_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] | ArgNotSet = NOTSET,
run_as_user: str | None | ArgNotSet = NOTSET,
executor_config: dict | None | ArgNotSet = NOTSET,
inlets: Any | None | ArgNotSet = NOTSET,
outlets: Any | None | ArgNotSet = NOTSET,
doc: str | None | ArgNotSet = NOTSET,
doc_md: str | None | ArgNotSet = NOTSET,
doc_json: str | None | ArgNotSet = NOTSET,
doc_yaml: str | None | ArgNotSet = NOTSET,
doc_rst: str | None | ArgNotSet = NOTSET,
**kwargs,
) -> OperatorPartial:
from airflow.models.dag import DagContext
from airflow.utils.task_group import TaskGroupContext
validate_mapping_kwargs(operator_class, "partial", kwargs)
dag = dag or DagContext.get_current_dag()
if dag:
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
if task_group:
task_id = task_group.child_id(task_id)
# Merge DAG and task group level defaults into user-supplied values.
dag_default_args, partial_params = get_merged_defaults(
dag=dag,
task_group=task_group,
task_params=params,
task_default_args=kwargs.pop("default_args", None),
)
# Create partial_kwargs from args and kwargs
partial_kwargs: dict[str, Any] = {
**kwargs,
"dag": dag,
"task_group": task_group,
"task_id": task_id,
"start_date": start_date,
"end_date": end_date,
"owner": owner,
"email": email,
"trigger_rule": trigger_rule,
"depends_on_past": depends_on_past,
"ignore_first_depends_on_past": ignore_first_depends_on_past,
"wait_for_past_depends_before_skipping": wait_for_past_depends_before_skipping,
"wait_for_downstream": wait_for_downstream,
"retries": retries,
"queue": queue,
"pool": pool,
"pool_slots": pool_slots,
"execution_timeout": execution_timeout,
"max_retry_delay": max_retry_delay,
"retry_delay": retry_delay,
"retry_exponential_backoff": retry_exponential_backoff,
"priority_weight": priority_weight,
"weight_rule": weight_rule,
"sla": sla,
"max_active_tis_per_dag": max_active_tis_per_dag,
"max_active_tis_per_dagrun": max_active_tis_per_dagrun,
"on_execute_callback": on_execute_callback,
"on_failure_callback": on_failure_callback,
"on_retry_callback": on_retry_callback,
"on_success_callback": on_success_callback,
"run_as_user": run_as_user,
"executor_config": executor_config,
"inlets": inlets,
"outlets": outlets,
"resources": resources,
"doc": doc,
"doc_json": doc_json,
"doc_md": doc_md,
"doc_rst": doc_rst,
"doc_yaml": doc_yaml,
}
# Inject DAG-level default args into args provided to this function.
partial_kwargs.update((k, v) for k, v in dag_default_args.items() if partial_kwargs.get(k) is NOTSET)
# Fill fields not provided by the user with default values.
partial_kwargs = {k: _PARTIAL_DEFAULTS.get(k) if v is NOTSET else v for k, v in partial_kwargs.items()}
# Post-process arguments. Should be kept in sync with _TaskDecorator.expand().
if "task_concurrency" in kwargs: # Reject deprecated option.
raise TypeError("unexpected argument: task_concurrency")
if partial_kwargs["wait_for_downstream"]:
partial_kwargs["depends_on_past"] = True
partial_kwargs["start_date"] = timezone.convert_to_utc(partial_kwargs["start_date"])
partial_kwargs["end_date"] = timezone.convert_to_utc(partial_kwargs["end_date"])
if partial_kwargs["pool"] is None:
partial_kwargs["pool"] = Pool.DEFAULT_POOL_NAME
partial_kwargs["retries"] = parse_retries(partial_kwargs["retries"])
partial_kwargs["retry_delay"] = coerce_timedelta(partial_kwargs["retry_delay"], key="retry_delay")
if partial_kwargs["max_retry_delay"] is not None:
partial_kwargs["max_retry_delay"] = coerce_timedelta(
partial_kwargs["max_retry_delay"],
key="max_retry_delay",
)
partial_kwargs["executor_config"] = partial_kwargs["executor_config"] or {}
partial_kwargs["resources"] = coerce_resources(partial_kwargs["resources"])
return OperatorPartial(
operator_class=operator_class,
kwargs=partial_kwargs,
params=partial_params,
)
class BaseOperatorMeta(abc.ABCMeta):
"""Metaclass of BaseOperator."""
@classmethod
def _apply_defaults(cls, func: T) -> T:
"""
Function decorator that Looks for an argument named "default_args", and
fills the unspecified arguments from it.
Since python2.* isn't clear about which arguments are missing when
calling a function, and that this can be quite confusing with multi-level
inheritance and argument defaults, this decorator also alerts with
specific information about the missing arguments.
"""
# Cache inspect.signature for the wrapper closure to avoid calling it
# at every decorated invocation. This is separate sig_cache created
# per decoration, i.e. each function decorated using apply_defaults will
# have a different sig_cache.
sig_cache = signature(func)
non_variadic_params = {
name: param
for (name, param) in sig_cache.parameters.items()
if param.name != "self" and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
}
non_optional_args = {
name
for name, param in non_variadic_params.items()
if param.default == param.empty and name != "task_id"
}
fixup_decorator_warning_stack(func)
@functools.wraps(func)
def apply_defaults(self: BaseOperator, *args: Any, **kwargs: Any) -> Any:
from airflow.models.dag import DagContext
from airflow.utils.task_group import TaskGroupContext
if len(args) > 0:
raise AirflowException("Use keyword arguments when initializing operators")
instantiated_from_mapped = kwargs.pop(
"_airflow_from_mapped",
getattr(self, "_BaseOperator__from_mapped", False),
)
dag: DAG | None = kwargs.get("dag") or DagContext.get_current_dag()
task_group: TaskGroup | None = kwargs.get("task_group")
if dag and not task_group:
task_group = TaskGroupContext.get_current_task_group(dag)
default_args, merged_params = get_merged_defaults(
dag=dag,
task_group=task_group,
task_params=kwargs.pop("params", None),
task_default_args=kwargs.pop("default_args", None),
)
for arg in sig_cache.parameters:
if arg not in kwargs and arg in default_args:
kwargs[arg] = default_args[arg]
missing_args = non_optional_args - set(kwargs)
if len(missing_args) == 1:
raise AirflowException(f"missing keyword argument {missing_args.pop()!r}")
elif missing_args:
display = ", ".join(repr(a) for a in sorted(missing_args))
raise AirflowException(f"missing keyword arguments {display}")
if merged_params:
kwargs["params"] = merged_params
hook = getattr(self, "_hook_apply_defaults", None)
if hook:
args, kwargs = hook(**kwargs, default_args=default_args)
default_args = kwargs.pop("default_args", {})
if not hasattr(self, "_BaseOperator__init_kwargs"):
self._BaseOperator__init_kwargs = {}
self._BaseOperator__from_mapped = instantiated_from_mapped
result = func(self, **kwargs, default_args=default_args)
# Store the args passed to init -- we need them to support task.map serialzation!
self._BaseOperator__init_kwargs.update(kwargs) # type: ignore
# Set upstream task defined by XComArgs passed to template fields of the operator.
# BUT: only do this _ONCE_, not once for each class in the hierarchy
if not instantiated_from_mapped and func == self.__init__.__wrapped__: # type: ignore[misc]
self.set_xcomargs_dependencies()
# Mark instance as instantiated.
self._BaseOperator__instantiated = True
return result
apply_defaults.__non_optional_args = non_optional_args # type: ignore
apply_defaults.__param_names = set(non_variadic_params) # type: ignore
return cast(T, apply_defaults)
def __new__(cls, name, bases, namespace, **kwargs):
new_cls = super().__new__(cls, name, bases, namespace, **kwargs)
with contextlib.suppress(KeyError):
# Update the partial descriptor with the class method, so it calls the actual function
# (but let subclasses override it if they need to)
partial_desc = vars(new_cls)["partial"]
if isinstance(partial_desc, _PartialDescriptor):
partial_desc.class_method = classmethod(partial)
new_cls.__init__ = cls._apply_defaults(new_cls.__init__)
return new_cls
@functools.total_ordering
class BaseOperator(AbstractOperator, metaclass=BaseOperatorMeta):
"""
Abstract base class for all operators. Since operators create objects that
become nodes in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator that runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:param owner: the owner of the task. Using a meaningful description
(e.g. user/person/team/role name) to clarify ownership is recommended.
:param email: the 'to' email address(es) used in email alerts. This can be a
single email or multiple ones. Multiple addresses can be specified as a
comma or semicolon separated string or by passing a list of strings.
:param email_on_retry: Indicates whether email alerts should be sent when a
task is retried
:param email_on_failure: Indicates whether email alerts should be sent when
a task failed
:param retries: the number of retries that should be performed before
failing the task
:param retry_delay: delay between retries, can be set as ``timedelta`` or
``float`` seconds, which will be converted into ``timedelta``,
the default is ``timedelta(seconds=300)``.
:param retry_exponential_backoff: allow progressively longer waits between
retries by using exponential backoff algorithm on retry delay (delay
will be converted into seconds)
:param max_retry_delay: maximum delay interval between retries, can be set as
``timedelta`` or ``float`` seconds, which will be converted into ``timedelta``.
:param start_date: The ``start_date`` for the task, determines
the ``execution_date`` for the first task instance. The best practice
is to have the start_date rounded
to your DAG's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``execution_date`` and adds the ``schedule_interval`` to determine
the next ``execution_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their execution_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:param end_date: if specified, the scheduler won't go beyond this date
:param depends_on_past: when set to true, task instances will run
sequentially and only if the previous instance has succeeded or has been skipped.
The task instance for the start_date is allowed to run.
:param wait_for_past_depends_before_skipping: when set to true, if the task instance
should be marked as skipped, and depends_on_past is true, the ti will stay on None state
waiting the task of the previous run
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully or be skipped before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used. Also note that
only tasks *immediately* downstream of the previous task instance are waited
for; the statuses of any tasks further downstream are ignored.
:param dag: a reference to the dag the task is attached to (if any)
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up. Set priority_weight as a higher
number for more important tasks.
:param weight_rule: weighting method used for the effective total
priority weight of the task. Options are:
``{ downstream | upstream | absolute }`` default is ``downstream``
When set to ``downstream`` the effective weight of the task is the
aggregate sum of all downstream descendants. As a result, upstream
tasks will have higher weight and will be scheduled more aggressively
when using positive weight values. This is useful when you have
multiple dag run instances and desire to have all upstream tasks to
complete for all runs before each dag can continue processing
downstream tasks. When set to ``upstream`` the effective weight is the
aggregate sum of all upstream ancestors. This is the opposite where
downstream tasks have higher weight and will be scheduled more
aggressively when using positive weight values. This is useful when you
have multiple dag run instances and prefer to have each dag complete
before starting upstream tasks of other dags. When set to
``absolute``, the effective weight is the exact ``priority_weight``
specified without additional weighting. You may want to do this when
you know exactly what priority weight each task should have.
Additionally, when set to ``absolute``, there is bonus effect of
significantly speeding up the task creation process as for very large
DAGs. Options can be set as string or using the constants defined in
the static class ``airflow.utils.WeightRule``
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:param pool_slots: the number of pool slots this task should use (>= 1)
Values less than 1 are not allowed.
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send an email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeeded yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for SLA misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:param on_failure_callback: a function or list of functions to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:param on_execute_callback: much like the ``on_failure_callback`` except
that it is executed right before the task is executed.
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:param pre_execute: a function to be called immediately before task
execution, receiving a context dictionary; raising an exception will
prevent the task from being executed.
|experimental|
:param post_execute: a function to be called immediately after task
execution, receiving a context dictionary and task result; raising an
exception will prevent the task from succeeding.
|experimental|
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | all_skipped | one_success | one_done |
one_failed | none_failed | none_failed_min_one_success | none_skipped | always}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:param run_as_user: unix username to impersonate while running the task
:param max_active_tis_per_dag: When set, a task will be able to limit the concurrent
runs across execution_dates.
:param max_active_tis_per_dagrun: When set, a task will be able to limit the concurrent
task instances per DAG run.
:param executor_config: Additional task-level configuration parameters that are
interpreted by a specific executor. Parameters are namespaced by the name of
executor.
**Example**: to run this task in a specific docker container through
the KubernetesExecutor ::
MyOperator(...,
executor_config={
"KubernetesExecutor":
{"image": "myCustomDockerImage"}
}
)
:param do_xcom_push: if True, an XCom is pushed containing the Operator's
result
:param task_group: The TaskGroup to which the task should belong. This is typically provided when not
using a TaskGroup as a context manager.
:param doc: Add documentation or notes to your Task objects that is visible in
Task Instance details View in the Webserver
:param doc_md: Add documentation (in Markdown format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:param doc_rst: Add documentation (in RST format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:param doc_json: Add documentation (in JSON format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:param doc_yaml: Add documentation (in YAML format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
"""
# Implementing Operator.
template_fields: Sequence[str] = ()
template_ext: Sequence[str] = ()
template_fields_renderers: dict[str, str] = {}
# Defines the color in the UI
ui_color: str = "#fff"
ui_fgcolor: str = "#000"
pool: str = ""
# base list which includes all the attrs that don't need deep copy.
_base_operator_shallow_copy_attrs: tuple[str, ...] = (
"user_defined_macros",
"user_defined_filters",
"params",
"_log",
)
# each operator should override this class attr for shallow copy attrs.
shallow_copy_attrs: Sequence[str] = ()
# Defines the operator level extra links
operator_extra_links: Collection[BaseOperatorLink] = ()
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: frozenset[str] | None = None
partial: Callable[..., OperatorPartial] = _PartialDescriptor() # type: ignore
_comps = {
"task_id",
"dag_id",
"owner",
"email",
"email_on_retry",
"retry_delay",
"retry_exponential_backoff",
"max_retry_delay",
"start_date",
"end_date",
"depends_on_past",
"wait_for_downstream",
"priority_weight",
"sla",
"execution_timeout",
"on_execute_callback",
"on_failure_callback",
"on_success_callback",
"on_retry_callback",
"do_xcom_push",
}
# Defines if the operator supports lineage without manual definitions
supports_lineage = False
# If True then the class constructor was called
__instantiated = False
# List of args as passed to `init()`, after apply_defaults() has been updated. Used to "recreate" the task
# when mapping
__init_kwargs: dict[str, Any]
# Set to True before calling execute method
_lock_for_execution = False
_dag: DAG | None = None
task_group: TaskGroup | None = None
# subdag parameter is only set for SubDagOperator.
# Setting it to None by default as other Operators do not have that field
subdag: DAG | None = None
start_date: pendulum.DateTime | None = None
end_date: pendulum.DateTime | None = None
# Set to True for an operator instantiated by a mapped operator.
__from_mapped = False
def __init__(
self,
task_id: str,
owner: str = DEFAULT_OWNER,
email: str | Iterable[str] | None = None,
email_on_retry: bool = conf.getboolean("email", "default_email_on_retry", fallback=True),
email_on_failure: bool = conf.getboolean("email", "default_email_on_failure", fallback=True),
retries: int | None = DEFAULT_RETRIES,
retry_delay: timedelta | float = DEFAULT_RETRY_DELAY,
retry_exponential_backoff: bool = False,
max_retry_delay: timedelta | float | None = None,
start_date: datetime | None = None,
end_date: datetime | None = None,
depends_on_past: bool = False,
ignore_first_depends_on_past: bool = DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST,
wait_for_past_depends_before_skipping: bool = DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING,
wait_for_downstream: bool = False,
dag: DAG | None = None,
params: collections.abc.MutableMapping | None = None,
default_args: dict | None = None,
priority_weight: int = DEFAULT_PRIORITY_WEIGHT,
weight_rule: str = DEFAULT_WEIGHT_RULE,
queue: str = DEFAULT_QUEUE,
pool: str | None = None,
pool_slots: int = DEFAULT_POOL_SLOTS,
sla: timedelta | None = None,
execution_timeout: timedelta | None = DEFAULT_TASK_EXECUTION_TIMEOUT,
on_execute_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] = None,
on_failure_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] = None,
on_success_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] = None,
on_retry_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] = None,
pre_execute: TaskPreExecuteHook | None = None,
post_execute: TaskPostExecuteHook | None = None,
trigger_rule: str = DEFAULT_TRIGGER_RULE,
resources: dict[str, Any] | None = None,
run_as_user: str | None = None,
task_concurrency: int | None = None,
max_active_tis_per_dag: int | None = None,
max_active_tis_per_dagrun: int | None = None,
executor_config: dict | None = None,
do_xcom_push: bool = True,
inlets: Any | None = None,
outlets: Any | None = None,
task_group: TaskGroup | None = None,
doc: str | None = None,
doc_md: str | None = None,
doc_json: str | None = None,
doc_yaml: str | None = None,
doc_rst: str | None = None,
**kwargs,
):
from airflow.models.dag import DagContext
from airflow.utils.task_group import TaskGroupContext
self.__init_kwargs = {}
super().__init__()
kwargs.pop("_airflow_mapped_validation_only", None)
if kwargs:
if not conf.getboolean("operators", "ALLOW_ILLEGAL_ARGUMENTS"):
raise AirflowException(
f"Invalid arguments were passed to {self.__class__.__name__} (task_id: {task_id}). "
f"Invalid arguments were:\n**kwargs: {kwargs}",
)
warnings.warn(
f"Invalid arguments were passed to {self.__class__.__name__} (task_id: {task_id}). "
"Support for passing such arguments will be dropped in future. "
f"Invalid arguments were:\n**kwargs: {kwargs}",
category=RemovedInAirflow3Warning,
stacklevel=3,
)
validate_key(task_id)
dag = dag or DagContext.get_current_dag()
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
DagInvalidTriggerRule.check(dag, trigger_rule)
self.task_id = task_group.child_id(task_id) if task_group else task_id
if not self.__from_mapped and task_group:
task_group.add(self)
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
if execution_timeout is not None and not isinstance(execution_timeout, timedelta):
raise ValueError(
f"execution_timeout must be timedelta object but passed as type: {type(execution_timeout)}"
)
self.execution_timeout = execution_timeout
self.on_execute_callback = on_execute_callback
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
self._pre_execute_hook = pre_execute
self._post_execute_hook = post_execute
if start_date and not isinstance(start_date, datetime):
self.log.warning("start_date for %s isn't datetime.datetime", self)
elif start_date:
self.start_date = timezone.convert_to_utc(start_date)
if end_date:
self.end_date = timezone.convert_to_utc(end_date)
self.executor_config = executor_config or {}
self.run_as_user = run_as_user
self.retries = parse_retries(retries)
self.queue = queue
self.pool = Pool.DEFAULT_POOL_NAME if pool is None else pool
self.pool_slots = pool_slots
if self.pool_slots < 1:
dag_str = f" in dag {dag.dag_id}" if dag else ""
raise ValueError(f"pool slots for {self.task_id}{dag_str} cannot be less than 1")
self.sla = sla
if trigger_rule == "dummy":
warnings.warn(
"dummy Trigger Rule is deprecated. Please use `TriggerRule.ALWAYS`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
trigger_rule = TriggerRule.ALWAYS
if trigger_rule == "none_failed_or_skipped":
warnings.warn(
"none_failed_or_skipped Trigger Rule is deprecated. "
"Please use `none_failed_min_one_success`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
trigger_rule = TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS
if not TriggerRule.is_valid(trigger_rule):
raise AirflowException(
f"The trigger_rule must be one of {TriggerRule.all_triggers()},"
f"'{dag.dag_id if dag else ''}.{task_id}'; received '{trigger_rule}'."
)
self.trigger_rule: TriggerRule = TriggerRule(trigger_rule)
self.depends_on_past: bool = depends_on_past
self.ignore_first_depends_on_past: bool = ignore_first_depends_on_past
self.wait_for_past_depends_before_skipping: bool = wait_for_past_depends_before_skipping
self.wait_for_downstream: bool = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
self.retry_delay = coerce_timedelta(retry_delay, key="retry_delay")
self.retry_exponential_backoff = retry_exponential_backoff
self.max_retry_delay = (
max_retry_delay
if max_retry_delay is None
else coerce_timedelta(max_retry_delay, key="max_retry_delay")
)
# At execution_time this becomes a normal dict
self.params: ParamsDict | dict = ParamsDict(params)
if priority_weight is not None and not isinstance(priority_weight, int):
raise AirflowException(
f"`priority_weight` for task '{self.task_id}' only accepts integers, "
f"received '{type(priority_weight)}'."
)
self.priority_weight = priority_weight
if not WeightRule.is_valid(weight_rule):
raise AirflowException(
f"The weight_rule must be one of "
f"{WeightRule.all_weight_rules},'{dag.dag_id if dag else ''}.{task_id}'; "
f"received '{weight_rule}'."
)
self.weight_rule = weight_rule
self.resources = coerce_resources(resources)
if task_concurrency and not max_active_tis_per_dag:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The 'task_concurrency' parameter is deprecated. Please use 'max_active_tis_per_dag'.",
RemovedInAirflow3Warning,
stacklevel=2,
)
max_active_tis_per_dag = task_concurrency
self.max_active_tis_per_dag: int | None = max_active_tis_per_dag
self.max_active_tis_per_dagrun: int | None = max_active_tis_per_dagrun
self.do_xcom_push = do_xcom_push
self.doc_md = doc_md
self.doc_json = doc_json
self.doc_yaml = doc_yaml
self.doc_rst = doc_rst
self.doc = doc
self.upstream_task_ids: set[str] = set()
self.downstream_task_ids: set[str] = set()
if dag:
self.dag = dag
self._log = logging.getLogger("airflow.task.operators")
# Lineage
self.inlets: list = []
self.outlets: list = []
if inlets:
self.inlets = (
inlets
if isinstance(inlets, list)
else [
inlets,
]
)
if outlets:
self.outlets = (
outlets
if isinstance(outlets, list)
else [
outlets,
]
)
if isinstance(self.template_fields, str):
warnings.warn(
f"The `template_fields` value for {self.task_type} is a string "
"but should be a list or tuple of string. Wrapping it in a list for execution. "
f"Please update {self.task_type} accordingly.",
UserWarning,
stacklevel=2,
)
self.template_fields = [self.template_fields]
if SetupTeardownContext.active:
SetupTeardownContext.update_context_map(self)
def __enter__(self):
if not self.is_setup and not self.is_teardown:
raise AirflowException("Only setup/teardown tasks can be used as context managers.")
SetupTeardownContext.push_setup_teardown_task(self)
return SetupTeardownContext
def __exit__(self, exc_type, exc_val, exc_tb):
SetupTeardownContext.set_work_task_roots_and_leaves()
def __eq__(self, other):
if type(self) is type(other):
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
return all(getattr(self, c, None) == getattr(other, c, None) for c in self._comps)
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
hash_components = [type(self)]
for component in self._comps:
val = getattr(self, component, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# including lineage information
def __or__(self, other):
"""
Called for [This Operator] | [Operator], The inlets of other
will be set to pick up the outlets from this operator. Other will
be set as a downstream task of this operator.
"""
if isinstance(other, BaseOperator):
if not self.outlets and not self.supports_lineage:
raise ValueError("No outlets defined for this operator")
other.add_inlets([self.task_id])
self.set_downstream(other)
else:
raise TypeError(f"Right hand side ({other}) is not an Operator")
return self
# /Composing Operators ---------------------------------------------
def __gt__(self, other):
"""
Called for [Operator] > [Outlet], so that if other is an attr annotated object
it is set as an outlet of this Operator.
"""
if not isinstance(other, Iterable):
other = [other]
for obj in other:
if not attr.has(obj):
raise TypeError(f"Left hand side ({obj}) is not an outlet")
self.add_outlets(other)
return self
def __lt__(self, other):
"""
Called for [Inlet] > [Operator] or [Operator] < [Inlet], so that if other is
an attr annotated object it is set as an inlet to this operator.
"""
if not isinstance(other, Iterable):
other = [other]
for obj in other:
if not attr.has(obj):
raise TypeError(f"{obj} cannot be an inlet")
self.add_inlets(other)
return self
def __setattr__(self, key, value):
super().__setattr__(key, value)
if self.__from_mapped or self._lock_for_execution:
return # Skip any custom behavior for validation and during execute.
if key in self.__init_kwargs:
self.__init_kwargs[key] = value
if self.__instantiated and key in self.template_fields:
# Resolve upstreams set by assigning an XComArg after initializing
# an operator, example:
# op = BashOperator()
# op.bash_command = "sleep 1"
self.set_xcomargs_dependencies()
def add_inlets(self, inlets: Iterable[Any]):
"""Sets inlets to this operator."""
self.inlets.extend(inlets)
def add_outlets(self, outlets: Iterable[Any]):
"""Defines the outlets of this operator."""
self.outlets.extend(outlets)
def get_inlet_defs(self):
"""Gets inlet definitions on this task.
:meta private:
"""
return self.inlets
def get_outlet_defs(self):
"""Gets outlet definitions on this task.
:meta private:
"""
return self.outlets
def get_dag(self) -> DAG | None:
return self._dag
@property # type: ignore[override]
def dag(self) -> DAG: # type: ignore[override]
"""Returns the Operator's DAG if set, otherwise raises an error."""
if self._dag:
return self._dag
else:
raise AirflowException(f"Operator {self} has not been assigned to a DAG yet")
@dag.setter
def dag(self, dag: DAG | None):
"""
Operators can be assigned to one DAG, one time. Repeat assignments to
that same DAG are ok.
"""
from airflow.models.dag import DAG
if dag is None:
self._dag = None
return
if not isinstance(dag, DAG):
raise TypeError(f"Expected DAG; received {dag.__class__.__name__}")
elif self.has_dag() and self.dag is not dag:
raise AirflowException(f"The DAG assigned to {self} can not be changed.")
if self.__from_mapped:
pass # Don't add to DAG -- the mapped task takes the place.
elif self.task_id not in dag.task_dict:
dag.add_task(self)
elif self.task_id in dag.task_dict and dag.task_dict[self.task_id] is not self:
dag.add_task(self)
self._dag = dag
def has_dag(self):
"""Returns True if the Operator has been assigned to a DAG."""
return self._dag is not None
deps: frozenset[BaseTIDep] = frozenset(
{
NotInRetryPeriodDep(),
PrevDagrunDep(),
TriggerRuleDep(),
NotPreviouslySkippedDep(),
}
)
"""
Returns the set of dependencies for the operator. These differ from execution
context dependencies in that they are specific to tasks and can be
extended/overridden by subclasses.
"""
def prepare_for_execution(self) -> BaseOperator:
"""
Lock task for execution to disable custom action in __setattr__ and
returns a copy of the task.
"""
other = copy.copy(self)
other._lock_for_execution = True
return other
def set_xcomargs_dependencies(self) -> None:
"""
Resolves upstream dependencies of a task. In this way passing an ``XComArg``
as value for a template field will result in creating upstream relation between
two tasks.
**Example**: ::
with DAG(...):
generate_content = GenerateContentOperator(task_id="generate_content")
send_email = EmailOperator(..., html_content=generate_content.output)
# This is equivalent to
with DAG(...):
generate_content = GenerateContentOperator(task_id="generate_content")
send_email = EmailOperator(
..., html_content="{{ task_instance.xcom_pull('generate_content') }}"
)
generate_content >> send_email
"""
from airflow.models.xcom_arg import XComArg
for field in self.template_fields:
if hasattr(self, field):
arg = getattr(self, field)
XComArg.apply_upstream_relationship(self, arg)
@prepare_lineage
def pre_execute(self, context: Any):
"""This hook is triggered right before self.execute() is called."""
if self._pre_execute_hook is not None:
self._pre_execute_hook(context)
def execute(self, context: Context) -> Any:
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
@apply_lineage
def post_execute(self, context: Any, result: Any = None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
if self._post_execute_hook is not None:
self._post_execute_hook(context, result)
def on_kill(self) -> None:
"""
Override this method to clean up subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up, or it will leave
ghost processes behind.
"""
def __deepcopy__(self, memo):
# Hack sorting double chained task lists by task_id to avoid hitting
# max_depth on deepcopy operations.
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
shallow_copy = cls.shallow_copy_attrs + cls._base_operator_shallow_copy_attrs
for k, v in self.__dict__.items():
if k == "_BaseOperator__instantiated":
# Don't set this until the _end_, as it changes behaviour of __setattr__
continue
if k not in shallow_copy:
setattr(result, k, copy.deepcopy(v, memo))
else:
setattr(result, k, copy.copy(v))
result.__instantiated = self.__instantiated
return result
def __getstate__(self):
state = dict(self.__dict__)
del state["_log"]
return state
def __setstate__(self, state):
self.__dict__ = state
self._log = logging.getLogger("airflow.task.operators")
def render_template_fields(
self,
context: Context,
jinja_env: jinja2.Environment | None = None,
) -> None:
"""Template all attributes listed in *self.template_fields*.
This mutates the attributes in-place and is irreversible.
:param context: Context dict with values to apply on content.
:param jinja_env: Jinja's environment to use for rendering.
"""
if not jinja_env:
jinja_env = self.get_template_env()
self._do_render_template_fields(self, self.template_fields, context, jinja_env, set())
@provide_session
def clear(
self,
start_date: datetime | None = None,
end_date: datetime | None = None,
upstream: bool = False,
downstream: bool = False,
session: Session = NEW_SESSION,
):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
qry = select(TaskInstance).where(TaskInstance.dag_id == self.dag_id)
if start_date:
qry = qry.where(TaskInstance.execution_date >= start_date)
if end_date:
qry = qry.where(TaskInstance.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += [t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += [t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.where(TaskInstance.task_id.in_(tasks))
results = session.scalars(qry).all()
count = len(results)
clear_task_instances(results, session, dag=self.dag)
session.commit()
return count
@provide_session
def get_task_instances(
self,
start_date: datetime | None = None,
end_date: datetime | None = None,
session: Session = NEW_SESSION,
) -> list[TaskInstance]:
"""Get task instances related to this task for a specific date range."""
from airflow.models import DagRun
end_date = end_date or timezone.utcnow()
return session.scalars(
select(TaskInstance)
.join(TaskInstance.dag_run)
.where(TaskInstance.dag_id == self.dag_id)
.where(TaskInstance.task_id == self.task_id)
.where(DagRun.execution_date >= start_date)
.where(DagRun.execution_date <= end_date)
.order_by(DagRun.execution_date)
).all()
@provide_session
def run(
self,
start_date: datetime | None = None,
end_date: datetime | None = None,
ignore_first_depends_on_past: bool = True,
wait_for_past_depends_before_skipping: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
test_mode: bool = False,
session: Session = NEW_SESSION,
) -> None:
"""Run a set of task instances for a date range."""
from airflow.models import DagRun
from airflow.utils.types import DagRunType
# Assertions for typing -- we need a dag, for this function, and when we have a DAG we are
# _guaranteed_ to have start_date (else we couldn't have been added to a DAG)
if TYPE_CHECKING:
assert self.start_date
start_date = pendulum.instance(start_date or self.start_date)
end_date = pendulum.instance(end_date or self.end_date or timezone.utcnow())
for info in self.dag.iter_dagrun_infos_between(start_date, end_date, align=False):
ignore_depends_on_past = info.logical_date == start_date and ignore_first_depends_on_past
try:
dag_run = session.scalars(
select(DagRun).where(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == info.logical_date,
)
).one()
ti = TaskInstance(self, run_id=dag_run.run_id)
except NoResultFound:
# This is _mostly_ only used in tests
dr = DagRun(
dag_id=self.dag_id,
run_id=DagRun.generate_run_id(DagRunType.MANUAL, info.logical_date),
run_type=DagRunType.MANUAL,
execution_date=info.logical_date,
data_interval=info.data_interval,
)
ti = TaskInstance(self, run_id=dr.run_id)
ti.dag_run = dr
session.add(dr)
session.flush()
ti.run(
mark_success=mark_success,
ignore_depends_on_past=ignore_depends_on_past,
wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping,
ignore_ti_state=ignore_ti_state,
test_mode=test_mode,
session=session,
)
def dry_run(self) -> None:
"""Performs dry run for the operator - just render template fields."""
self.log.info("Dry run")
for field in self.template_fields:
try:
content = getattr(self, field)
except AttributeError:
raise AttributeError(
f"{field!r} is configured as a template field "
f"but {self.task_type} does not have this attribute."
)
if content and isinstance(content, str):
self.log.info("Rendering template for %s", field)
self.log.info(content)
def get_direct_relatives(self, upstream: bool = False) -> Iterable[Operator]:
"""
Get list of the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.task_type}): {self.task_id}>".format(self=self)
@property
def operator_class(self) -> type[BaseOperator]: # type: ignore[override]
return self.__class__
@property
def task_type(self) -> str:
"""@property: type of the task."""
return self.__class__.__name__
@property
def operator_name(self) -> str:
"""@property: use a more friendly display name for the operator, if set."""
try:
return self.custom_operator_name # type: ignore
except AttributeError:
return self.task_type
@property
def roots(self) -> list[BaseOperator]:
"""Required by DAGNode."""
return [self]
@property
def leaves(self) -> list[BaseOperator]:
"""Required by DAGNode."""
return [self]
@property
def output(self) -> XComArg:
"""Returns reference to XCom pushed by current operator."""
from airflow.models.xcom_arg import XComArg
return XComArg(operator=self)
@staticmethod
def xcom_push(
context: Any,
key: str,
value: Any,
execution_date: datetime | None = None,
) -> None:
"""
Make an XCom available for tasks to pull.
:param context: Execution Context Dictionary
:param key: A key for the XCom
:param value: A value for the XCom. The value is pickled and stored
in the database.
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
"""
context["ti"].xcom_push(key=key, value=value, execution_date=execution_date)
@staticmethod
@provide_session
def xcom_pull(
context: Any,
task_ids: str | list[str] | None = None,
dag_id: str | None = None,
key: str = XCOM_RETURN_KEY,
include_prior_dates: bool | None = None,
session: Session = NEW_SESSION,
) -> Any:
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param context: Execution Context Dictionary
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
"""
return context["ti"].xcom_pull(
key=key,
task_ids=task_ids,
dag_id=dag_id,
include_prior_dates=include_prior_dates,
session=session,
)
@classmethod
def get_serialized_fields(cls):
"""Stringified DAGs and operators contain exactly these fields."""
if not cls.__serialized_fields:
from airflow.models.dag import DagContext
# make sure the following dummy task is not added to current active
# dag in context, otherwise, it will result in
# `RuntimeError: dictionary changed size during iteration`
# Exception in SerializedDAG.serialize_dag() call.
DagContext.push_context_managed_dag(None)
cls.__serialized_fields = frozenset(
vars(BaseOperator(task_id="test")).keys()
- {
"upstream_task_ids",
"default_args",
"dag",
"_dag",
"label",
"_BaseOperator__instantiated",
"_BaseOperator__init_kwargs",
"_BaseOperator__from_mapped",
}
| { # Class level defaults need to be added to this list
"start_date",
"end_date",
"_task_type",
"_operator_name",
"subdag",
"ui_color",
"ui_fgcolor",
"template_ext",
"template_fields",
"template_fields_renderers",
"params",
"is_setup",
"is_teardown",
"on_failure_fail_dagrun",
}
)
DagContext.pop_context_managed_dag()
return cls.__serialized_fields
def serialize_for_task_group(self) -> tuple[DagAttributeTypes, Any]:
"""Required by DAGNode."""
return DagAttributeTypes.OP, self.task_id
@property
def inherits_from_empty_operator(self):
"""Used to determine if an Operator is inherited from EmptyOperator."""
# This looks like `isinstance(self, EmptyOperator) would work, but this also
# needs to cope when `self` is a Serialized instance of a EmptyOperator or one
# of its subclasses (which don't inherit from anything but BaseOperator).
return getattr(self, "_is_empty", False)
def defer(
self,
*,
trigger: BaseTrigger,
method_name: str,
kwargs: dict[str, Any] | None = None,
timeout: timedelta | None = None,
):
"""
Marks this Operator as being "deferred" - that is, suspending its
execution until the provided trigger fires an event.
This is achieved by raising a special exception (TaskDeferred)
which is caught in the main _execute_task wrapper.
"""
raise TaskDeferred(trigger=trigger, method_name=method_name, kwargs=kwargs, timeout=timeout)
def unmap(self, resolve: None | dict[str, Any] | tuple[Context, Session]) -> BaseOperator:
"""Get the "normal" operator from the current operator.
Since a BaseOperator is not mapped to begin with, this simply returns
the original operator.
:meta private:
"""
return self
# TODO: Deprecate for Airflow 3.0
Chainable = Union[DependencyMixin, Sequence[DependencyMixin]]
def chain(*tasks: DependencyMixin | Sequence[DependencyMixin]) -> None:
r"""
Given a number of tasks, builds a dependency chain.
This function accepts values of BaseOperator (aka tasks), EdgeModifiers (aka Labels), XComArg, TaskGroups,
or lists containing any mix of these types (or a mix in the same list). If you want to chain between two
lists you must ensure they have the same length.
Using classic operators/sensors:
.. code-block:: python
chain(t1, [t2, t3], [t4, t5], t6)
is equivalent to::
/ -> t2 -> t4 \
t1 -> t6
\ -> t3 -> t5 /
.. code-block:: python
t1.set_downstream(t2)
t1.set_downstream(t3)
t2.set_downstream(t4)
t3.set_downstream(t5)
t4.set_downstream(t6)
t5.set_downstream(t6)
Using task-decorated functions aka XComArgs:
.. code-block:: python
chain(x1(), [x2(), x3()], [x4(), x5()], x6())
is equivalent to::
/ -> x2 -> x4 \
x1 -> x6
\ -> x3 -> x5 /
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
x4 = x4()
x5 = x5()
x6 = x6()
x1.set_downstream(x2)
x1.set_downstream(x3)
x2.set_downstream(x4)
x3.set_downstream(x5)
x4.set_downstream(x6)
x5.set_downstream(x6)
Using TaskGroups:
.. code-block:: python
chain(t1, task_group1, task_group2, t2)
t1.set_downstream(task_group1)
task_group1.set_downstream(task_group2)
task_group2.set_downstream(t2)
It is also possible to mix between classic operator/sensor, EdgeModifiers, XComArg, and TaskGroups:
.. code-block:: python
chain(t1, [Label("branch one"), Label("branch two")], [x1(), x2()], task_group1, x3())
is equivalent to::
/ "branch one" -> x1 \
t1 -> task_group1 -> x3
\ "branch two" -> x2 /
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
label1 = Label("branch one")
label2 = Label("branch two")
t1.set_downstream(label1)
label1.set_downstream(x1)
t2.set_downstream(label2)
label2.set_downstream(x2)
x1.set_downstream(task_group1)
x2.set_downstream(task_group1)
task_group1.set_downstream(x3)
# or
x1 = x1()
x2 = x2()
x3 = x3()
t1.set_downstream(x1, edge_modifier=Label("branch one"))
t1.set_downstream(x2, edge_modifier=Label("branch two"))
x1.set_downstream(task_group1)
x2.set_downstream(task_group1)
task_group1.set_downstream(x3)
:param tasks: Individual and/or list of tasks, EdgeModifiers, XComArgs, or TaskGroups to set dependencies
"""
for index, up_task in enumerate(tasks[:-1]):
down_task = tasks[index + 1]
if isinstance(up_task, DependencyMixin):
up_task.set_downstream(down_task)
continue
if isinstance(down_task, DependencyMixin):
down_task.set_upstream(up_task)
continue
if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence):
raise TypeError(f"Chain not supported between instances of {type(up_task)} and {type(down_task)}")
up_task_list = up_task
down_task_list = down_task
if len(up_task_list) != len(down_task_list):
raise AirflowException(
f"Chain not supported for different length Iterable. "
f"Got {len(up_task_list)} and {len(down_task_list)}."
)
for up_t, down_t in zip(up_task_list, down_task_list):
up_t.set_downstream(down_t)
def cross_downstream(
from_tasks: Sequence[DependencyMixin],
to_tasks: DependencyMixin | Sequence[DependencyMixin],
):
r"""
Set downstream dependencies for all tasks in from_tasks to all tasks in to_tasks.
Using classic operators/sensors:
.. code-block:: python
cross_downstream(from_tasks=[t1, t2, t3], to_tasks=[t4, t5, t6])
is equivalent to::
t1 ---> t4
\ /
t2 -X -> t5
/ \
t3 ---> t6
.. code-block:: python
t1.set_downstream(t4)
t1.set_downstream(t5)
t1.set_downstream(t6)
t2.set_downstream(t4)
t2.set_downstream(t5)
t2.set_downstream(t6)
t3.set_downstream(t4)
t3.set_downstream(t5)
t3.set_downstream(t6)
Using task-decorated functions aka XComArgs:
.. code-block:: python
cross_downstream(from_tasks=[x1(), x2(), x3()], to_tasks=[x4(), x5(), x6()])
is equivalent to::
x1 ---> x4
\ /
x2 -X -> x5
/ \
x3 ---> x6
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
x4 = x4()
x5 = x5()
x6 = x6()
x1.set_downstream(x4)
x1.set_downstream(x5)
x1.set_downstream(x6)
x2.set_downstream(x4)
x2.set_downstream(x5)
x2.set_downstream(x6)
x3.set_downstream(x4)
x3.set_downstream(x5)
x3.set_downstream(x6)
It is also possible to mix between classic operator/sensor and XComArg tasks:
.. code-block:: python
cross_downstream(from_tasks=[t1, x2(), t3], to_tasks=[x1(), t2, x3()])
is equivalent to::
t1 ---> x1
\ /
x2 -X -> t2
/ \
t3 ---> x3
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
t1.set_downstream(x1)
t1.set_downstream(t2)
t1.set_downstream(x3)
x2.set_downstream(x1)
x2.set_downstream(t2)
x2.set_downstream(x3)
t3.set_downstream(x1)
t3.set_downstream(t2)
t3.set_downstream(x3)
:param from_tasks: List of tasks or XComArgs to start from.
:param to_tasks: List of tasks or XComArgs to set as downstream dependencies.
"""
for task in from_tasks:
task.set_downstream(to_tasks)
def chain_linear(*elements: DependencyMixin | Sequence[DependencyMixin]):
"""
Helper to simplify task dependency definition.
E.g.: suppose you want precedence like so::
╭─op2─╮ ╭─op4─╮
op1─┤ ├─├─op5─┤─op7
╰-op3─╯ ╰-op6─╯
Then you can accomplish like so::
chain_linear(
op1,
[op2, op3],
[op4, op5, op6],
op7
)
:param elements: a list of operators / lists of operators
"""
if not elements:
raise ValueError("No tasks provided; nothing to do.")
prev_elem = None
deps_set = False
for curr_elem in elements:
if isinstance(curr_elem, EdgeModifier):
raise ValueError("Labels are not supported by chain_linear")
if prev_elem is not None:
for task in prev_elem:
task >> curr_elem
if not deps_set:
deps_set = True
prev_elem = [curr_elem] if isinstance(curr_elem, DependencyMixin) else curr_elem
if not deps_set:
raise ValueError("No dependencies were set. Did you forget to expand with `*`?")
# pyupgrade assumes all type annotations can be lazily evaluated, but this is
# not the case for attrs-decorated classes, since cattrs needs to evaluate the
# annotation expressions at runtime, and Python before 3.9.0 does not lazily
# evaluate those. Putting the expression in a top-level assignment statement
# communicates this runtime requirement to pyupgrade.
BaseOperatorClassList = List[Type[BaseOperator]]
@attr.s(auto_attribs=True)
class BaseOperatorLink(metaclass=ABCMeta):
"""Abstract base class that defines how we get an operator link."""
operators: ClassVar[BaseOperatorClassList] = []
"""
This property will be used by Airflow Plugins to find the Operators to which you want
to assign this Operator Link
:return: List of Operator classes used by task for which you want to create extra link
"""
@property
@abstractmethod
def name(self) -> str:
"""Name of the link. This will be the button name on the task UI."""
@abstractmethod
def get_link(self, operator: BaseOperator, *, ti_key: TaskInstanceKey) -> str:
"""Link to external system.
Note: The old signature of this function was ``(self, operator, dttm: datetime)``. That is still
supported at runtime but is deprecated.
:param operator: The Airflow operator object this link is associated to.
:param ti_key: TaskInstance ID to return link for.
:return: link to external system
"""
| 73,549 | 38.12234 | 110 |
py
|
airflow
|
airflow-main/airflow/models/dag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections
import collections.abc
import copy
import functools
import itertools
import logging
import os
import pathlib
import pickle
import sys
import traceback
import warnings
import weakref
from collections import deque
from datetime import datetime, timedelta
from inspect import signature
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Container,
Iterable,
Iterator,
List,
Pattern,
Sequence,
Union,
cast,
overload,
)
from urllib.parse import urlsplit
import jinja2
import pendulum
import re2
from dateutil.relativedelta import relativedelta
from pendulum.tz.timezone import Timezone
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
Index,
Integer,
String,
Text,
and_,
case,
func,
not_,
or_,
select,
update,
)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import backref, joinedload, relationship
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.session import Session
from sqlalchemy.sql import Select, expression
import airflow.templates
from airflow import settings, utils
from airflow.api_internal.internal_api_call import internal_api_call
from airflow.configuration import conf as airflow_conf, secrets_backend_list
from airflow.exceptions import (
AirflowDagInconsistent,
AirflowException,
AirflowSkipException,
DagInvalidTriggerRule,
DuplicateTaskIdFound,
RemovedInAirflow3Warning,
TaskNotFound,
)
from airflow.jobs.job import run_job
from airflow.models.abstractoperator import AbstractOperator
from airflow.models.base import Base, StringID
from airflow.models.baseoperator import BaseOperator
from airflow.models.dagcode import DagCode
from airflow.models.dagpickle import DagPickle
from airflow.models.dagrun import RUN_ID_REGEX, DagRun
from airflow.models.operator import Operator
from airflow.models.param import DagParam, ParamsDict
from airflow.models.taskinstance import Context, TaskInstance, TaskInstanceKey, clear_task_instances
from airflow.secrets.local_filesystem import LocalFilesystemBackend
from airflow.security import permissions
from airflow.stats import Stats
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
from airflow.timetables.interval import CronDataIntervalTimetable, DeltaDataIntervalTimetable
from airflow.timetables.simple import (
ContinuousTimetable,
DatasetTriggeredTimetable,
NullTimetable,
OnceTimetable,
)
from airflow.typing_compat import Literal
from airflow.utils import timezone
from airflow.utils.dag_cycle_tester import check_cycle
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.decorators import fixup_decorator_warning_stack
from airflow.utils.helpers import at_most_one, exactly_one, validate_key
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import (
Interval,
UtcDateTime,
lock_rows,
skip_locked,
tuple_in_condition,
with_row_locks,
)
from airflow.utils.state import DagRunState, TaskInstanceState
from airflow.utils.types import NOTSET, ArgNotSet, DagRunType, EdgeInfoType
if TYPE_CHECKING:
from types import ModuleType
from airflow.datasets import Dataset
from airflow.decorators import TaskDecoratorCollection
from airflow.models.dagbag import DagBag
from airflow.models.slamiss import SlaMiss
from airflow.utils.task_group import TaskGroup
log = logging.getLogger(__name__)
DEFAULT_VIEW_PRESETS = ["grid", "graph", "duration", "gantt", "landing_times"]
ORIENTATION_PRESETS = ["LR", "TB", "RL", "BT"]
TAG_MAX_LEN = 100
DagStateChangeCallback = Callable[[Context], None]
ScheduleInterval = Union[None, str, timedelta, relativedelta]
# FIXME: Ideally this should be Union[Literal[NOTSET], ScheduleInterval],
# but Mypy cannot handle that right now. Track progress of PEP 661 for progress.
# See also: https://discuss.python.org/t/9126/7
ScheduleIntervalArg = Union[ArgNotSet, ScheduleInterval]
ScheduleArg = Union[ArgNotSet, ScheduleInterval, Timetable, Collection["Dataset"]]
SLAMissCallback = Callable[["DAG", str, str, List["SlaMiss"], List[TaskInstance]], None]
# Backward compatibility: If neither schedule_interval nor timetable is
# *provided by the user*, default to a one-day interval.
DEFAULT_SCHEDULE_INTERVAL = timedelta(days=1)
class InconsistentDataInterval(AirflowException):
"""Exception raised when a model populates data interval fields incorrectly.
The data interval fields should either both be None (for runs scheduled
prior to AIP-39), or both be datetime (for runs scheduled after AIP-39 is
implemented). This is raised if exactly one of the fields is None.
"""
_template = (
"Inconsistent {cls}: {start[0]}={start[1]!r}, {end[0]}={end[1]!r}, "
"they must be either both None or both datetime"
)
def __init__(self, instance: Any, start_field_name: str, end_field_name: str) -> None:
self._class_name = type(instance).__name__
self._start_field = (start_field_name, getattr(instance, start_field_name))
self._end_field = (end_field_name, getattr(instance, end_field_name))
def __str__(self) -> str:
return self._template.format(cls=self._class_name, start=self._start_field, end=self._end_field)
def _get_model_data_interval(
instance: Any,
start_field_name: str,
end_field_name: str,
) -> DataInterval | None:
start = timezone.coerce_datetime(getattr(instance, start_field_name))
end = timezone.coerce_datetime(getattr(instance, end_field_name))
if start is None:
if end is not None:
raise InconsistentDataInterval(instance, start_field_name, end_field_name)
return None
elif end is None:
raise InconsistentDataInterval(instance, start_field_name, end_field_name)
return DataInterval(start, end)
def create_timetable(interval: ScheduleIntervalArg, timezone: Timezone) -> Timetable:
"""Create a Timetable instance from a ``schedule_interval`` argument."""
if interval is NOTSET:
return DeltaDataIntervalTimetable(DEFAULT_SCHEDULE_INTERVAL)
if interval is None:
return NullTimetable()
if interval == "@once":
return OnceTimetable()
if interval == "@continuous":
return ContinuousTimetable()
if isinstance(interval, (timedelta, relativedelta)):
return DeltaDataIntervalTimetable(interval)
if isinstance(interval, str):
return CronDataIntervalTimetable(interval, timezone)
raise ValueError(f"{interval!r} is not a valid schedule_interval.")
def get_last_dagrun(dag_id, session, include_externally_triggered=False):
"""
Returns the last dag run for a dag, None if there was none.
Last dag run can be any type of run e.g. scheduled or backfilled.
Overridden DagRuns are ignored.
"""
DR = DagRun
query = select(DR).where(DR.dag_id == dag_id)
if not include_externally_triggered:
query = query.where(DR.external_trigger == expression.false())
query = query.order_by(DR.execution_date.desc())
return session.scalar(query.limit(1))
def get_dataset_triggered_next_run_info(
dag_ids: list[str], *, session: Session
) -> dict[str, dict[str, int | str]]:
"""
Given a list of dag_ids, get string representing how close any that are dataset triggered are
their next run, e.g. "1 of 2 datasets updated".
"""
from airflow.models.dataset import DagScheduleDatasetReference, DatasetDagRunQueue as DDRQ, DatasetModel
return {
x.dag_id: {
"uri": x.uri,
"ready": x.ready,
"total": x.total,
}
for x in session.execute(
select(
DagScheduleDatasetReference.dag_id,
# This is a dirty hack to workaround group by requiring an aggregate,
# since grouping by dataset is not what we want to do here...but it works
case((func.count() == 1, func.max(DatasetModel.uri)), else_="").label("uri"),
func.count().label("total"),
func.sum(case((DDRQ.target_dag_id.is_not(None), 1), else_=0)).label("ready"),
)
.join(
DDRQ,
and_(
DDRQ.dataset_id == DagScheduleDatasetReference.dataset_id,
DDRQ.target_dag_id == DagScheduleDatasetReference.dag_id,
),
isouter=True,
)
.join(DatasetModel, DatasetModel.id == DagScheduleDatasetReference.dataset_id)
.group_by(DagScheduleDatasetReference.dag_id)
.where(DagScheduleDatasetReference.dag_id.in_(dag_ids))
).all()
}
@functools.total_ordering
class DAG(LoggingMixin):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start date and an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
Note that if you plan to use time zones all the dates provided should be pendulum
dates. See :ref:`timezone_aware_dags`.
.. versionadded:: 2.4
The *schedule* argument to specify either time-based scheduling logic
(timetable), or dataset-driven triggers.
.. deprecated:: 2.4
The arguments *schedule_interval* and *timetable*. Their functionalities
are merged into the new *schedule* argument.
:param dag_id: The id of the DAG; must consist exclusively of alphanumeric
characters, dashes, dots and underscores (all ASCII)
:param description: The description for the DAG to e.g. be shown on the webserver
:param schedule: Defines the rules according to which DAG runs are scheduled. Can
accept cron string, timedelta object, Timetable, or list of Dataset objects.
See also :doc:`/howto/timetable`.
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:param end_date: A date beyond which your DAG won't run, leave to None
for open-ended scheduling
:param template_searchpath: This list of folders (non-relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:param template_undefined: Template undefined type.
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:param max_active_tasks: the number of task instances allowed to run
concurrently
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:param dagrun_timeout: specify how long a DagRun should be up before
timing out / failing, so that new DagRuns can be created.
:param sla_miss_callback: specify a function or list of functions to call when reporting SLA
timeouts. See :ref:`sla_miss_callback<concepts:sla_miss_callback>` for
more information about the function signature and parameters that are
passed to the callback.
:param default_view: Specify DAG default view (grid, graph, duration,
gantt, landing_times), default grid
:param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT), default LR
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
:param on_failure_callback: A function or list of functions to be called when a DagRun of this dag fails.
A context dictionary is passed as a single parameter to this function.
:param on_success_callback: Much like the ``on_failure_callback`` except
that it is executed when the dag succeeds.
:param access_control: Specify optional DAG-level actions, e.g.,
"{'role1': {'can_read'}, 'role2': {'can_read', 'can_edit', 'can_delete'}}"
:param is_paused_upon_creation: Specifies if the dag is paused when created for the first time.
If the dag exists already, this flag will be ignored. If this optional parameter
is not specified, the global config setting will be used.
:param jinja_environment_kwargs: additional configuration options to be passed to Jinja
``Environment`` for template rendering
**Example**: to avoid Jinja from removing a trailing newline from template strings ::
DAG(dag_id='my-dag',
jinja_environment_kwargs={
'keep_trailing_newline': True,
# some other jinja2 Environment options here
}
)
**See**: `Jinja Environment documentation
<https://jinja.palletsprojects.com/en/2.11.x/api/#jinja2.Environment>`_
:param render_template_as_native_obj: If True, uses a Jinja ``NativeEnvironment``
to render templates as native Python types. If False, a Jinja
``Environment`` is used to render templates as string values.
:param tags: List of tags to help filtering DAGs in the UI.
:param owner_links: Dict of owners and their links, that will be clickable on the DAGs view UI.
Can be used as an HTTP link (for example the link to your Slack channel), or a mailto link.
e.g: {"dag_owner": "https://airflow.apache.org/"}
:param auto_register: Automatically register this DAG when it is used in a ``with`` block
:param fail_stop: Fails currently running tasks when task in DAG fails.
**Warning**: A fail stop dag can only have tasks with the default trigger rule ("all_success").
An exception will be thrown if any task in a fail stop dag has a non default trigger rule.
"""
_comps = {
"dag_id",
"task_ids",
"parent_dag",
"start_date",
"end_date",
"schedule_interval",
"fileloc",
"template_searchpath",
"last_loaded",
}
__serialized_fields: frozenset[str] | None = None
fileloc: str
"""
File path that needs to be imported to load this DAG or subdag.
This may not be an actual file on disk in the case when this DAG is loaded
from a ZIP file or other DAG distribution format.
"""
parent_dag: DAG | None = None # Gets set when DAGs are loaded
# NOTE: When updating arguments here, please also keep arguments in @dag()
# below in sync. (Search for 'def dag(' in this file.)
def __init__(
self,
dag_id: str,
description: str | None = None,
schedule: ScheduleArg = NOTSET,
schedule_interval: ScheduleIntervalArg = NOTSET,
timetable: Timetable | None = None,
start_date: datetime | None = None,
end_date: datetime | None = None,
full_filepath: str | None = None,
template_searchpath: str | Iterable[str] | None = None,
template_undefined: type[jinja2.StrictUndefined] = jinja2.StrictUndefined,
user_defined_macros: dict | None = None,
user_defined_filters: dict | None = None,
default_args: dict | None = None,
concurrency: int | None = None,
max_active_tasks: int = airflow_conf.getint("core", "max_active_tasks_per_dag"),
max_active_runs: int = airflow_conf.getint("core", "max_active_runs_per_dag"),
dagrun_timeout: timedelta | None = None,
sla_miss_callback: None | SLAMissCallback | list[SLAMissCallback] = None,
default_view: str = airflow_conf.get_mandatory_value("webserver", "dag_default_view").lower(),
orientation: str = airflow_conf.get_mandatory_value("webserver", "dag_orientation"),
catchup: bool = airflow_conf.getboolean("scheduler", "catchup_by_default"),
on_success_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None,
on_failure_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None,
doc_md: str | None = None,
params: collections.abc.MutableMapping | None = None,
access_control: dict | None = None,
is_paused_upon_creation: bool | None = None,
jinja_environment_kwargs: dict | None = None,
render_template_as_native_obj: bool = False,
tags: list[str] | None = None,
owner_links: dict[str, str] | None = None,
auto_register: bool = True,
fail_stop: bool = False,
):
from airflow.utils.task_group import TaskGroup
if tags and any(len(tag) > TAG_MAX_LEN for tag in tags):
raise AirflowException(f"tag cannot be longer than {TAG_MAX_LEN} characters")
self.owner_links = owner_links if owner_links else {}
self.user_defined_macros = user_defined_macros
self.user_defined_filters = user_defined_filters
if default_args and not isinstance(default_args, dict):
raise TypeError("default_args must be a dict")
self.default_args = copy.deepcopy(default_args or {})
params = params or {}
# merging potentially conflicting default_args['params'] into params
if "params" in self.default_args:
params.update(self.default_args["params"])
del self.default_args["params"]
# check self.params and convert them into ParamsDict
self.params = ParamsDict(params)
if full_filepath:
warnings.warn(
"Passing full_filepath to DAG() is deprecated and has no effect",
RemovedInAirflow3Warning,
stacklevel=2,
)
validate_key(dag_id)
self._dag_id = dag_id
if concurrency:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The 'concurrency' parameter is deprecated. Please use 'max_active_tasks'.",
RemovedInAirflow3Warning,
stacklevel=2,
)
max_active_tasks = concurrency
self._max_active_tasks = max_active_tasks
self._pickle_id: int | None = None
self._description = description
# set file location to caller source path
back = sys._getframe().f_back
self.fileloc = back.f_code.co_filename if back else ""
self.task_dict: dict[str, Operator] = {}
# set timezone from start_date
tz = None
if start_date and start_date.tzinfo:
tzinfo = None if start_date.tzinfo else settings.TIMEZONE
tz = pendulum.instance(start_date, tz=tzinfo).timezone
elif "start_date" in self.default_args and self.default_args["start_date"]:
date = self.default_args["start_date"]
if not isinstance(date, datetime):
date = timezone.parse(date)
self.default_args["start_date"] = date
start_date = date
tzinfo = None if date.tzinfo else settings.TIMEZONE
tz = pendulum.instance(date, tz=tzinfo).timezone
self.timezone = tz or settings.TIMEZONE
# Apply the timezone we settled on to end_date if it wasn't supplied
if "end_date" in self.default_args and self.default_args["end_date"]:
if isinstance(self.default_args["end_date"], str):
self.default_args["end_date"] = timezone.parse(
self.default_args["end_date"], timezone=self.timezone
)
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = timezone.convert_to_utc(end_date)
# also convert tasks
if "start_date" in self.default_args:
self.default_args["start_date"] = timezone.convert_to_utc(self.default_args["start_date"])
if "end_date" in self.default_args:
self.default_args["end_date"] = timezone.convert_to_utc(self.default_args["end_date"])
# sort out DAG's scheduling behavior
scheduling_args = [schedule_interval, timetable, schedule]
if not at_most_one(*scheduling_args):
raise ValueError("At most one allowed for args 'schedule_interval', 'timetable', and 'schedule'.")
if schedule_interval is not NOTSET:
warnings.warn(
"Param `schedule_interval` is deprecated and will be removed in a future release. "
"Please use `schedule` instead. ",
RemovedInAirflow3Warning,
stacklevel=2,
)
if timetable is not None:
warnings.warn(
"Param `timetable` is deprecated and will be removed in a future release. "
"Please use `schedule` instead. ",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.timetable: Timetable
self.schedule_interval: ScheduleInterval
self.dataset_triggers: Collection[Dataset] = []
if isinstance(schedule, Collection) and not isinstance(schedule, str):
from airflow.datasets import Dataset
if not all(isinstance(x, Dataset) for x in schedule):
raise ValueError("All elements in 'schedule' should be datasets")
self.dataset_triggers = list(schedule)
elif isinstance(schedule, Timetable):
timetable = schedule
elif schedule is not NOTSET:
schedule_interval = schedule
if self.dataset_triggers:
self.timetable = DatasetTriggeredTimetable()
self.schedule_interval = self.timetable.summary
elif timetable:
self.timetable = timetable
self.schedule_interval = self.timetable.summary
else:
if isinstance(schedule_interval, ArgNotSet):
schedule_interval = DEFAULT_SCHEDULE_INTERVAL
self.schedule_interval = schedule_interval
self.timetable = create_timetable(schedule_interval, self.timezone)
if isinstance(template_searchpath, str):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.template_undefined = template_undefined
self.last_loaded = timezone.utcnow()
self.safe_dag_id = dag_id.replace(".", "__dot__")
self.max_active_runs = max_active_runs
if self.timetable.active_runs_limit is not None:
if self.timetable.active_runs_limit < self.max_active_runs:
raise AirflowException(
f"Invalid max_active_runs: {type(self.timetable)} "
f"requires max_active_runs <= {self.timetable.active_runs_limit}"
)
self.dagrun_timeout = dagrun_timeout
self.sla_miss_callback = sla_miss_callback
if default_view in DEFAULT_VIEW_PRESETS:
self._default_view: str = default_view
elif default_view == "tree":
warnings.warn(
"`default_view` of 'tree' has been renamed to 'grid' -- please update your DAG",
RemovedInAirflow3Warning,
stacklevel=2,
)
self._default_view = "grid"
else:
raise AirflowException(
f"Invalid values of dag.default_view: only support "
f"{DEFAULT_VIEW_PRESETS}, but get {default_view}"
)
if orientation in ORIENTATION_PRESETS:
self.orientation = orientation
else:
raise AirflowException(
f"Invalid values of dag.orientation: only support "
f"{ORIENTATION_PRESETS}, but get {orientation}"
)
self.catchup = catchup
self.partial = False
self.on_success_callback = on_success_callback
self.on_failure_callback = on_failure_callback
# Keeps track of any extra edge metadata (sparse; will not contain all
# edges, so do not iterate over it for that). Outer key is upstream
# task ID, inner key is downstream task ID.
self.edge_info: dict[str, dict[str, EdgeInfoType]] = {}
# To keep it in parity with Serialized DAGs
# and identify if DAG has on_*_callback without actually storing them in Serialized JSON
self.has_on_success_callback = self.on_success_callback is not None
self.has_on_failure_callback = self.on_failure_callback is not None
self._access_control = DAG._upgrade_outdated_dag_access_control(access_control)
self.is_paused_upon_creation = is_paused_upon_creation
self.auto_register = auto_register
self.fail_stop = fail_stop
self.jinja_environment_kwargs = jinja_environment_kwargs
self.render_template_as_native_obj = render_template_as_native_obj
self.doc_md = self.get_doc_md(doc_md)
self.tags = tags or []
self._task_group = TaskGroup.create_root(self)
self.validate_schedule_and_params()
wrong_links = dict(self.iter_invalid_owner_links())
if wrong_links:
raise AirflowException(
"Wrong link format was used for the owner. Use a valid link \n"
f"Bad formatted links are: {wrong_links}"
)
# this will only be set at serialization time
# it's only use is for determining the relative
# fileloc based only on the serialize dag
self._processor_dags_folder = None
def get_doc_md(self, doc_md: str | None) -> str | None:
if doc_md is None:
return doc_md
env = self.get_template_env(force_sandboxed=True)
if not doc_md.endswith(".md"):
template = jinja2.Template(doc_md)
else:
try:
template = env.get_template(doc_md)
except jinja2.exceptions.TemplateNotFound:
return f"""
# Templating Error!
Not able to find the template file: `{doc_md}`.
"""
return template.render()
def _check_schedule_interval_matches_timetable(self) -> bool:
"""Check ``schedule_interval`` and ``timetable`` match.
This is done as a part of the DAG validation done before it's bagged, to
guard against the DAG's ``timetable`` (or ``schedule_interval``) from
being changed after it's created, e.g.
.. code-block:: python
dag1 = DAG("d1", timetable=MyTimetable())
dag1.schedule_interval = "@once"
dag2 = DAG("d2", schedule="@once")
dag2.timetable = MyTimetable()
Validation is done by creating a timetable and check its summary matches
``schedule_interval``. The logic is not bullet-proof, especially if a
custom timetable does not provide a useful ``summary``. But this is the
best we can do.
"""
if self.schedule_interval == self.timetable.summary:
return True
try:
timetable = create_timetable(self.schedule_interval, self.timezone)
except ValueError:
return False
return timetable.summary == self.timetable.summary
def validate(self):
"""Validate the DAG has a coherent setup.
This is called by the DAG bag before bagging the DAG.
"""
if not self._check_schedule_interval_matches_timetable():
raise AirflowDagInconsistent(
f"inconsistent schedule: timetable {self.timetable.summary!r} "
f"does not match schedule_interval {self.schedule_interval!r}",
)
self.params.validate()
self.timetable.validate()
self.validate_setup_teardown()
def validate_setup_teardown(self):
"""
Validate that setup and teardown tasks are configured properly.
:meta private:
"""
for task in self.tasks:
if task.is_setup and not any(x.is_teardown for x in task.downstream_list):
raise AirflowDagInconsistent(
f"Dag has setup without teardown: dag='{self.dag_id}', task='{task.task_id}'"
)
if task.is_teardown and all(x.is_setup for x in task.upstream_list):
raise AirflowDagInconsistent(
f"Dag has teardown task without an upstream work task: dag='{self.dag_id}',"
f" task='{task.task_id}'"
)
def __repr__(self):
return f"<DAG: {self.dag_id}>"
def __eq__(self, other):
if type(self) == type(other):
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
return all(getattr(self, c, None) == getattr(other, c, None) for c in self._comps)
return False
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
# task_ids returns a list and lists can't be hashed
if c == "task_ids":
val = tuple(self.task_dict.keys())
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Context Manager -----------------------------------------------
def __enter__(self):
DagContext.push_context_managed_dag(self)
return self
def __exit__(self, _type, _value, _tb):
DagContext.pop_context_managed_dag()
# /Context Manager ----------------------------------------------
@staticmethod
def _upgrade_outdated_dag_access_control(access_control=None):
"""
Looks for outdated dag level actions (can_dag_read and can_dag_edit) in DAG
access_controls (for example, {'role1': {'can_dag_read'}, 'role2': {'can_dag_read', 'can_dag_edit'}})
and replaces them with updated actions (can_read and can_edit).
"""
if not access_control:
return None
new_perm_mapping = {
permissions.DEPRECATED_ACTION_CAN_DAG_READ: permissions.ACTION_CAN_READ,
permissions.DEPRECATED_ACTION_CAN_DAG_EDIT: permissions.ACTION_CAN_EDIT,
}
updated_access_control = {}
for role, perms in access_control.items():
updated_access_control[role] = {new_perm_mapping.get(perm, perm) for perm in perms}
if access_control != updated_access_control:
warnings.warn(
"The 'can_dag_read' and 'can_dag_edit' permissions are deprecated. "
"Please use 'can_read' and 'can_edit', respectively.",
RemovedInAirflow3Warning,
stacklevel=3,
)
return updated_access_control
def date_range(
self,
start_date: pendulum.DateTime,
num: int | None = None,
end_date: datetime | None = None,
) -> list[datetime]:
message = "`DAG.date_range()` is deprecated."
if num is not None:
warnings.warn(message, category=RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return utils_date_range(
start_date=start_date, num=num, delta=self.normalized_schedule_interval
)
message += " Please use `DAG.iter_dagrun_infos_between(..., align=False)` instead."
warnings.warn(message, category=RemovedInAirflow3Warning, stacklevel=2)
if end_date is None:
coerced_end_date = timezone.utcnow()
else:
coerced_end_date = end_date
it = self.iter_dagrun_infos_between(start_date, pendulum.instance(coerced_end_date), align=False)
return [info.logical_date for info in it]
def is_fixed_time_schedule(self):
warnings.warn(
"`DAG.is_fixed_time_schedule()` is deprecated.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
try:
return not self.timetable._should_fix_dst
except AttributeError:
return True
def following_schedule(self, dttm):
"""
Calculates the following schedule for this dag in UTC.
:param dttm: utc datetime
:return: utc datetime
"""
warnings.warn(
"`DAG.following_schedule()` is deprecated. Use `DAG.next_dagrun_info(restricted=False)` instead.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
data_interval = self.infer_automated_data_interval(timezone.coerce_datetime(dttm))
next_info = self.next_dagrun_info(data_interval, restricted=False)
if next_info is None:
return None
return next_info.data_interval.start
def previous_schedule(self, dttm):
from airflow.timetables.interval import _DataIntervalTimetable
warnings.warn(
"`DAG.previous_schedule()` is deprecated.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
if not isinstance(self.timetable, _DataIntervalTimetable):
return None
return self.timetable._get_prev(timezone.coerce_datetime(dttm))
def get_next_data_interval(self, dag_model: DagModel) -> DataInterval | None:
"""Get the data interval of the next scheduled run.
For compatibility, this method infers the data interval from the DAG's
schedule if the run does not have an explicit one set, which is possible
for runs created prior to AIP-39.
This function is private to Airflow core and should not be depended on as a
part of the Python API.
:meta private:
"""
if self.dag_id != dag_model.dag_id:
raise ValueError(f"Arguments refer to different DAGs: {self.dag_id} != {dag_model.dag_id}")
if dag_model.next_dagrun is None: # Next run not scheduled.
return None
data_interval = dag_model.next_dagrun_data_interval
if data_interval is not None:
return data_interval
# Compatibility: A run was scheduled without an explicit data interval.
# This means the run was scheduled before AIP-39 implementation. Try to
# infer from the logical date.
return self.infer_automated_data_interval(dag_model.next_dagrun)
def get_run_data_interval(self, run: DagRun) -> DataInterval:
"""Get the data interval of this run.
For compatibility, this method infers the data interval from the DAG's
schedule if the run does not have an explicit one set, which is possible for
runs created prior to AIP-39.
This function is private to Airflow core and should not be depended on as a
part of the Python API.
:meta private:
"""
if run.dag_id is not None and run.dag_id != self.dag_id:
raise ValueError(f"Arguments refer to different DAGs: {self.dag_id} != {run.dag_id}")
data_interval = _get_model_data_interval(run, "data_interval_start", "data_interval_end")
if data_interval is not None:
return data_interval
# Compatibility: runs created before AIP-39 implementation don't have an
# explicit data interval. Try to infer from the logical date.
return self.infer_automated_data_interval(run.execution_date)
def infer_automated_data_interval(self, logical_date: datetime) -> DataInterval:
"""Infer a data interval for a run against this DAG.
This method is used to bridge runs created prior to AIP-39
implementation, which do not have an explicit data interval. Therefore,
this method only considers ``schedule_interval`` values valid prior to
Airflow 2.2.
DO NOT call this method if there is a known data interval.
:meta private:
"""
timetable_type = type(self.timetable)
if issubclass(timetable_type, (NullTimetable, OnceTimetable, DatasetTriggeredTimetable)):
return DataInterval.exact(timezone.coerce_datetime(logical_date))
start = timezone.coerce_datetime(logical_date)
if issubclass(timetable_type, CronDataIntervalTimetable):
end = cast(CronDataIntervalTimetable, self.timetable)._get_next(start)
elif issubclass(timetable_type, DeltaDataIntervalTimetable):
end = cast(DeltaDataIntervalTimetable, self.timetable)._get_next(start)
# Contributors: When the exception below is raised, you might want to
# add an 'elif' block here to handle custom timetables. Stop! The bug
# you're looking for is instead at when the DAG run (represented by
# logical_date) was created. See GH-31969 for an example:
# * Wrong fix: GH-32074 (modifies this function).
# * Correct fix: GH-32118 (modifies the DAG run creation code).
else:
raise ValueError(f"Not a valid timetable: {self.timetable!r}")
return DataInterval(start, end)
def next_dagrun_info(
self,
last_automated_dagrun: None | datetime | DataInterval,
*,
restricted: bool = True,
) -> DagRunInfo | None:
"""Get information about the next DagRun of this dag after ``date_last_automated_dagrun``.
This calculates what time interval the next DagRun should operate on
(its execution date) and when it can be scheduled, according to the
dag's timetable, start_date, end_date, etc. This doesn't check max
active run or any other "max_active_tasks" type limits, but only
performs calculations based on the various date and interval fields of
this dag and its tasks.
:param last_automated_dagrun: The ``max(execution_date)`` of
existing "automated" DagRuns for this dag (scheduled or backfill,
but not manual).
:param restricted: If set to *False* (default is *True*), ignore
``start_date``, ``end_date``, and ``catchup`` specified on the DAG
or tasks.
:return: DagRunInfo of the next dagrun, or None if a dagrun is not
going to be scheduled.
"""
# Never schedule a subdag. It will be scheduled by its parent dag.
if self.is_subdag:
return None
data_interval = None
if isinstance(last_automated_dagrun, datetime):
warnings.warn(
"Passing a datetime to DAG.next_dagrun_info is deprecated. Use a DataInterval instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
data_interval = self.infer_automated_data_interval(
timezone.coerce_datetime(last_automated_dagrun)
)
else:
data_interval = last_automated_dagrun
if restricted:
restriction = self._time_restriction
else:
restriction = TimeRestriction(earliest=None, latest=None, catchup=True)
try:
info = self.timetable.next_dagrun_info(
last_automated_data_interval=data_interval,
restriction=restriction,
)
except Exception:
self.log.exception(
"Failed to fetch run info after data interval %s for DAG %r",
data_interval,
self.dag_id,
)
info = None
return info
def next_dagrun_after_date(self, date_last_automated_dagrun: pendulum.DateTime | None):
warnings.warn(
"`DAG.next_dagrun_after_date()` is deprecated. Please use `DAG.next_dagrun_info()` instead.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
if date_last_automated_dagrun is None:
data_interval = None
else:
data_interval = self.infer_automated_data_interval(date_last_automated_dagrun)
info = self.next_dagrun_info(data_interval)
if info is None:
return None
return info.run_after
@functools.cached_property
def _time_restriction(self) -> TimeRestriction:
start_dates = [t.start_date for t in self.tasks if t.start_date]
if self.start_date is not None:
start_dates.append(self.start_date)
earliest = None
if start_dates:
earliest = timezone.coerce_datetime(min(start_dates))
latest = self.end_date
end_dates = [t.end_date for t in self.tasks if t.end_date]
if len(end_dates) == len(self.tasks): # not exists null end_date
if self.end_date is not None:
end_dates.append(self.end_date)
if end_dates:
latest = timezone.coerce_datetime(max(end_dates))
return TimeRestriction(earliest, latest, self.catchup)
def iter_dagrun_infos_between(
self,
earliest: pendulum.DateTime | None,
latest: pendulum.DateTime,
*,
align: bool = True,
) -> Iterable[DagRunInfo]:
"""Yield DagRunInfo using this DAG's timetable between given interval.
DagRunInfo instances yielded if their ``logical_date`` is not earlier
than ``earliest``, nor later than ``latest``. The instances are ordered
by their ``logical_date`` from earliest to latest.
If ``align`` is ``False``, the first run will happen immediately on
``earliest``, even if it does not fall on the logical timetable schedule.
The default is ``True``, but subdags will ignore this value and always
behave as if this is set to ``False`` for backward compatibility.
Example: A DAG is scheduled to run every midnight (``0 0 * * *``). If
``earliest`` is ``2021-06-03 23:00:00``, the first DagRunInfo would be
``2021-06-03 23:00:00`` if ``align=False``, and ``2021-06-04 00:00:00``
if ``align=True``.
"""
if earliest is None:
earliest = self._time_restriction.earliest
if earliest is None:
raise ValueError("earliest was None and we had no value in time_restriction to fallback on")
earliest = timezone.coerce_datetime(earliest)
latest = timezone.coerce_datetime(latest)
restriction = TimeRestriction(earliest, latest, catchup=True)
# HACK: Sub-DAGs are currently scheduled differently. For example, say
# the schedule is @daily and start is 2021-06-03 22:16:00, a top-level
# DAG should be first scheduled to run on midnight 2021-06-04, but a
# sub-DAG should be first scheduled to run RIGHT NOW. We can change
# this, but since sub-DAGs are going away in 3.0 anyway, let's keep
# compatibility for now and remove this entirely later.
if self.is_subdag:
align = False
try:
info = self.timetable.next_dagrun_info(
last_automated_data_interval=None,
restriction=restriction,
)
except Exception:
self.log.exception(
"Failed to fetch run info after data interval %s for DAG %r",
None,
self.dag_id,
)
info = None
if info is None:
# No runs to be scheduled between the user-supplied timeframe. But
# if align=False, "invent" a data interval for the timeframe itself.
if not align:
yield DagRunInfo.interval(earliest, latest)
return
# If align=False and earliest does not fall on the timetable's logical
# schedule, "invent" a data interval for it.
if not align and info.logical_date != earliest:
yield DagRunInfo.interval(earliest, info.data_interval.start)
# Generate naturally according to schedule.
while info is not None:
yield info
try:
info = self.timetable.next_dagrun_info(
last_automated_data_interval=info.data_interval,
restriction=restriction,
)
except Exception:
self.log.exception(
"Failed to fetch run info after data interval %s for DAG %r",
info.data_interval if info else "<NONE>",
self.dag_id,
)
break
def get_run_dates(self, start_date, end_date=None) -> list:
"""
Returns a list of dates between the interval received as parameter using this
dag's schedule interval. Returned dates can be used for execution dates.
:param start_date: The start date of the interval.
:param end_date: The end date of the interval. Defaults to ``timezone.utcnow()``.
:return: A list of dates within the interval following the dag's schedule.
"""
warnings.warn(
"`DAG.get_run_dates()` is deprecated. Please use `DAG.iter_dagrun_infos_between()` instead.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
earliest = timezone.coerce_datetime(start_date)
if end_date is None:
latest = pendulum.now(timezone.utc)
else:
latest = timezone.coerce_datetime(end_date)
return [info.logical_date for info in self.iter_dagrun_infos_between(earliest, latest)]
def normalize_schedule(self, dttm):
warnings.warn(
"`DAG.normalize_schedule()` is deprecated.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
following = self.following_schedule(dttm)
if not following: # in case of @once
return dttm
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
previous_of_following = self.previous_schedule(following)
if previous_of_following != dttm:
return following
return dttm
@provide_session
def get_last_dagrun(self, session=NEW_SESSION, include_externally_triggered=False):
return get_last_dagrun(
self.dag_id, session=session, include_externally_triggered=include_externally_triggered
)
@provide_session
def has_dag_runs(self, session=NEW_SESSION, include_externally_triggered=True) -> bool:
return (
get_last_dagrun(
self.dag_id, session=session, include_externally_triggered=include_externally_triggered
)
is not None
)
@property
def dag_id(self) -> str:
return self._dag_id
@dag_id.setter
def dag_id(self, value: str) -> None:
self._dag_id = value
@property
def is_subdag(self) -> bool:
return self.parent_dag is not None
@property
def full_filepath(self) -> str:
"""Full file path to the DAG.
:meta private:
"""
warnings.warn(
"DAG.full_filepath is deprecated in favour of fileloc",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.fileloc
@full_filepath.setter
def full_filepath(self, value) -> None:
warnings.warn(
"DAG.full_filepath is deprecated in favour of fileloc",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.fileloc = value
@property
def concurrency(self) -> int:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The 'DAG.concurrency' attribute is deprecated. Please use 'DAG.max_active_tasks'.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self._max_active_tasks
@concurrency.setter
def concurrency(self, value: int):
self._max_active_tasks = value
@property
def max_active_tasks(self) -> int:
return self._max_active_tasks
@max_active_tasks.setter
def max_active_tasks(self, value: int):
self._max_active_tasks = value
@property
def access_control(self):
return self._access_control
@access_control.setter
def access_control(self, value):
self._access_control = DAG._upgrade_outdated_dag_access_control(value)
@property
def description(self) -> str | None:
return self._description
@property
def default_view(self) -> str:
return self._default_view
@property
def pickle_id(self) -> int | None:
return self._pickle_id
@pickle_id.setter
def pickle_id(self, value: int) -> None:
self._pickle_id = value
def param(self, name: str, default: Any = NOTSET) -> DagParam:
"""
Return a DagParam object for current dag.
:param name: dag parameter name.
:param default: fallback value for dag parameter.
:return: DagParam instance for specified name and current dag.
"""
return DagParam(current_dag=self, name=name, default=default)
@property
def tasks(self) -> list[Operator]:
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError("DAG.tasks can not be modified. Use dag.add_task() instead.")
@property
def task_ids(self) -> list[str]:
return list(self.task_dict.keys())
@property
def teardowns(self) -> list[Operator]:
return [task for task in self.tasks if getattr(task, "is_teardown", None)]
@property
def tasks_upstream_of_teardowns(self) -> list[Operator]:
upstream_tasks = [t.upstream_list for t in self.teardowns]
return [val for sublist in upstream_tasks for val in sublist if not getattr(val, "is_teardown", None)]
@property
def task_group(self) -> TaskGroup:
return self._task_group
@property
def filepath(self) -> str:
"""Relative file path to the DAG.
:meta private:
"""
warnings.warn(
"filepath is deprecated, use relative_fileloc instead",
RemovedInAirflow3Warning,
stacklevel=2,
)
return str(self.relative_fileloc)
@property
def relative_fileloc(self) -> pathlib.Path:
"""File location of the importable dag 'file' relative to the configured DAGs folder."""
path = pathlib.Path(self.fileloc)
try:
rel_path = path.relative_to(self._processor_dags_folder or settings.DAGS_FOLDER)
if rel_path == pathlib.Path("."):
return path
else:
return rel_path
except ValueError:
# Not relative to DAGS_FOLDER.
return path
@property
def folder(self) -> str:
"""Folder location of where the DAG object is instantiated."""
return os.path.dirname(self.fileloc)
@property
def owner(self) -> str:
"""
Return list of all owners found in DAG tasks.
:return: Comma separated list of owners in DAG tasks
"""
return ", ".join({t.owner for t in self.tasks})
@property
def allow_future_exec_dates(self) -> bool:
return settings.ALLOW_FUTURE_EXEC_DATES and not self.timetable.can_be_scheduled
@provide_session
def get_concurrency_reached(self, session=NEW_SESSION) -> bool:
"""
Returns a boolean indicating whether the max_active_tasks limit for this DAG
has been reached.
"""
TI = TaskInstance
total_tasks = session.scalar(
select(func.count(TI.task_id)).where(
TI.dag_id == self.dag_id,
TI.state == TaskInstanceState.RUNNING,
)
)
return total_tasks >= self.max_active_tasks
@property
def concurrency_reached(self):
"""This attribute is deprecated. Please use `airflow.models.DAG.get_concurrency_reached` method."""
warnings.warn(
"This attribute is deprecated. Please use `airflow.models.DAG.get_concurrency_reached` method.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_concurrency_reached()
@provide_session
def get_is_active(self, session=NEW_SESSION) -> None:
"""Returns a boolean indicating whether this DAG is active."""
return session.scalar(select(DagModel.is_active).where(DagModel.dag_id == self.dag_id))
@provide_session
def get_is_paused(self, session=NEW_SESSION) -> None:
"""Returns a boolean indicating whether this DAG is paused."""
return session.scalar(select(DagModel.is_paused).where(DagModel.dag_id == self.dag_id))
@property
def is_paused(self):
"""This attribute is deprecated. Please use `airflow.models.DAG.get_is_paused` method."""
warnings.warn(
"This attribute is deprecated. Please use `airflow.models.DAG.get_is_paused` method.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_is_paused()
@property
def normalized_schedule_interval(self) -> ScheduleInterval:
warnings.warn(
"DAG.normalized_schedule_interval() is deprecated.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
if isinstance(self.schedule_interval, str) and self.schedule_interval in cron_presets:
_schedule_interval: ScheduleInterval = cron_presets.get(self.schedule_interval)
elif self.schedule_interval == "@once":
_schedule_interval = None
else:
_schedule_interval = self.schedule_interval
return _schedule_interval
@provide_session
def handle_callback(self, dagrun, success=True, reason=None, session=NEW_SESSION):
"""
Triggers the appropriate callback depending on the value of success, namely the
on_failure_callback or on_success_callback. This method gets the context of a
single TaskInstance part of this DagRun and passes that to the callable along
with a 'reason', primarily to differentiate DagRun failures.
.. note: The logs end up in
``$AIRFLOW_HOME/logs/scheduler/latest/PROJECT/DAG_FILE.py.log``
:param dagrun: DagRun object
:param success: Flag to specify if failure or success callback should be called
:param reason: Completion reason
:param session: Database session
"""
callbacks = self.on_success_callback if success else self.on_failure_callback
if callbacks:
callbacks = callbacks if isinstance(callbacks, list) else [callbacks]
tis = dagrun.get_task_instances(session=session)
ti = tis[-1] # get first TaskInstance of DagRun
ti.task = self.get_task(ti.task_id)
context = ti.get_template_context(session=session)
context.update({"reason": reason})
for callback in callbacks:
self.log.info("Executing dag callback function: %s", callback)
try:
callback(context)
except Exception:
self.log.exception("failed to invoke dag state update callback")
Stats.incr("dag.callback_exceptions", tags={"dag_id": dagrun.dag_id})
def get_active_runs(self):
"""
Returns a list of dag run execution dates currently running.
:return: List of execution dates
"""
runs = DagRun.find(dag_id=self.dag_id, state=DagRunState.RUNNING)
active_dates = []
for run in runs:
active_dates.append(run.execution_date)
return active_dates
@provide_session
def get_num_active_runs(self, external_trigger=None, only_running=True, session=NEW_SESSION):
"""
Returns the number of active "running" dag runs.
:param external_trigger: True for externally triggered active dag runs
:param session:
:return: number greater than 0 for active dag runs
"""
query = select(func.count()).where(DagRun.dag_id == self.dag_id)
if only_running:
query = query.where(DagRun.state == DagRunState.RUNNING)
else:
query = query.where(DagRun.state.in_({DagRunState.RUNNING, DagRunState.QUEUED}))
if external_trigger is not None:
query = query.where(
DagRun.external_trigger == (expression.true() if external_trigger else expression.false())
)
return session.scalar(query)
@provide_session
def get_dagrun(
self,
execution_date: datetime | None = None,
run_id: str | None = None,
session: Session = NEW_SESSION,
):
"""
Returns the dag run for a given execution date or run_id if it exists, otherwise
none.
:param execution_date: The execution date of the DagRun to find.
:param run_id: The run_id of the DagRun to find.
:param session:
:return: The DagRun if found, otherwise None.
"""
if not (execution_date or run_id):
raise TypeError("You must provide either the execution_date or the run_id")
query = select(DagRun)
if execution_date:
query = query.where(DagRun.dag_id == self.dag_id, DagRun.execution_date == execution_date)
if run_id:
query = query.where(DagRun.dag_id == self.dag_id, DagRun.run_id == run_id)
return session.scalar(query)
@provide_session
def get_dagruns_between(self, start_date, end_date, session=NEW_SESSION):
"""
Returns the list of dag runs between start_date (inclusive) and end_date (inclusive).
:param start_date: The starting execution date of the DagRun to find.
:param end_date: The ending execution date of the DagRun to find.
:param session:
:return: The list of DagRuns found.
"""
dagruns = session.scalars(
select(DagRun).where(
DagRun.dag_id == self.dag_id,
DagRun.execution_date >= start_date,
DagRun.execution_date <= end_date,
)
).all()
return dagruns
@provide_session
def get_latest_execution_date(self, session: Session = NEW_SESSION) -> pendulum.DateTime | None:
"""Returns the latest date for which at least one dag run exists."""
return session.scalar(select(func.max(DagRun.execution_date)).where(DagRun.dag_id == self.dag_id))
@property
def latest_execution_date(self):
"""This attribute is deprecated. Please use `airflow.models.DAG.get_latest_execution_date`."""
warnings.warn(
"This attribute is deprecated. Please use `airflow.models.DAG.get_latest_execution_date`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.get_latest_execution_date()
@property
def subdags(self):
"""Returns a list of the subdag objects associated to this DAG."""
# Check SubDag for class but don't check class directly
from airflow.operators.subdag import SubDagOperator
subdag_lst = []
for task in self.tasks:
if (
isinstance(task, SubDagOperator)
or
# TODO remove in Airflow 2.0
type(task).__name__ == "SubDagOperator"
or task.task_type == "SubDagOperator"
):
subdag_lst.append(task.subdag)
subdag_lst += task.subdag.subdags
return subdag_lst
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def get_template_env(self, *, force_sandboxed: bool = False) -> jinja2.Environment:
"""Build a Jinja2 environment."""
# Collect directories to search for template files
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
# Default values (for backward compatibility)
jinja_env_options = {
"loader": jinja2.FileSystemLoader(searchpath),
"undefined": self.template_undefined,
"extensions": ["jinja2.ext.do"],
"cache_size": 0,
}
if self.jinja_environment_kwargs:
jinja_env_options.update(self.jinja_environment_kwargs)
env: jinja2.Environment
if self.render_template_as_native_obj and not force_sandboxed:
env = airflow.templates.NativeEnvironment(**jinja_env_options)
else:
env = airflow.templates.SandboxedEnvironment(**jinja_env_options)
# Add any user defined items. Safe to edit globals as long as no templates are rendered yet.
# http://jinja.pocoo.org/docs/2.10/api/#jinja2.Environment.globals
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task().
"""
self.get_task(upstream_task_id).set_downstream(self.get_task(downstream_task_id))
@provide_session
def get_task_instances_before(
self,
base_date: datetime,
num: int,
*,
session: Session = NEW_SESSION,
) -> list[TaskInstance]:
"""Get ``num`` task instances before (including) ``base_date``.
The returned list may contain exactly ``num`` task instances
corresponding to any DagRunType. It can have less if there are
less than ``num`` scheduled DAG runs before ``base_date``.
"""
execution_dates: list[Any] = session.execute(
select(DagRun.execution_date)
.where(
DagRun.dag_id == self.dag_id,
DagRun.execution_date <= base_date,
)
.order_by(DagRun.execution_date.desc())
.limit(num)
).all()
if len(execution_dates) == 0:
return self.get_task_instances(start_date=base_date, end_date=base_date, session=session)
min_date: datetime | None = execution_dates[-1]._mapping.get(
"execution_date"
) # getting the last value from the list
return self.get_task_instances(start_date=min_date, end_date=base_date, session=session)
@provide_session
def get_task_instances(
self,
start_date: datetime | None = None,
end_date: datetime | None = None,
state: list[TaskInstanceState] | None = None,
session: Session = NEW_SESSION,
) -> list[TaskInstance]:
if not start_date:
start_date = (timezone.utcnow() - timedelta(30)).replace(
hour=0, minute=0, second=0, microsecond=0
)
query = self._get_task_instances(
task_ids=None,
start_date=start_date,
end_date=end_date,
run_id=None,
state=state or (),
include_subdags=False,
include_parentdag=False,
include_dependent_dags=False,
exclude_task_ids=(),
session=session,
)
return session.scalars(cast(Select, query).order_by(DagRun.execution_date)).all()
@overload
def _get_task_instances(
self,
*,
task_ids: Collection[str | tuple[str, int]] | None,
start_date: datetime | None,
end_date: datetime | None,
run_id: str | None,
state: TaskInstanceState | Sequence[TaskInstanceState],
include_subdags: bool,
include_parentdag: bool,
include_dependent_dags: bool,
exclude_task_ids: Collection[str | tuple[str, int]] | None,
session: Session,
dag_bag: DagBag | None = ...,
) -> Iterable[TaskInstance]:
... # pragma: no cover
@overload
def _get_task_instances(
self,
*,
task_ids: Collection[str | tuple[str, int]] | None,
as_pk_tuple: Literal[True],
start_date: datetime | None,
end_date: datetime | None,
run_id: str | None,
state: TaskInstanceState | Sequence[TaskInstanceState],
include_subdags: bool,
include_parentdag: bool,
include_dependent_dags: bool,
exclude_task_ids: Collection[str | tuple[str, int]] | None,
session: Session,
dag_bag: DagBag | None = ...,
recursion_depth: int = ...,
max_recursion_depth: int = ...,
visited_external_tis: set[TaskInstanceKey] = ...,
) -> set[TaskInstanceKey]:
... # pragma: no cover
def _get_task_instances(
self,
*,
task_ids: Collection[str | tuple[str, int]] | None,
as_pk_tuple: Literal[True, None] = None,
start_date: datetime | None,
end_date: datetime | None,
run_id: str | None,
state: TaskInstanceState | Sequence[TaskInstanceState],
include_subdags: bool,
include_parentdag: bool,
include_dependent_dags: bool,
exclude_task_ids: Collection[str | tuple[str, int]] | None,
session: Session,
dag_bag: DagBag | None = None,
recursion_depth: int = 0,
max_recursion_depth: int | None = None,
visited_external_tis: set[TaskInstanceKey] | None = None,
) -> Iterable[TaskInstance] | set[TaskInstanceKey]:
TI = TaskInstance
# If we are looking at subdags/dependent dags we want to avoid UNION calls
# in SQL (it doesn't play nice with fields that have no equality operator,
# like JSON types), we instead build our result set separately.
#
# This will be empty if we are only looking at one dag, in which case
# we can return the filtered TI query object directly.
result: set[TaskInstanceKey] = set()
# Do we want full objects, or just the primary columns?
if as_pk_tuple:
tis = select(TI.dag_id, TI.task_id, TI.run_id, TI.map_index)
else:
tis = select(TaskInstance)
tis = tis.join(TaskInstance.dag_run)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
conditions.append(
(TaskInstance.dag_id == dag.dag_id) & TaskInstance.task_id.in_(dag.task_ids)
)
tis = tis.where(or_(*conditions))
elif self.partial:
tis = tis.where(TaskInstance.dag_id == self.dag_id, TaskInstance.task_id.in_(self.task_ids))
else:
tis = tis.where(TaskInstance.dag_id == self.dag_id)
if run_id:
tis = tis.where(TaskInstance.run_id == run_id)
if start_date:
tis = tis.where(DagRun.execution_date >= start_date)
if task_ids is not None:
tis = tis.where(TaskInstance.ti_selector_condition(task_ids))
# This allows allow_trigger_in_future config to take affect, rather than mandating exec_date <= UTC
if end_date or not self.allow_future_exec_dates:
end_date = end_date or timezone.utcnow()
tis = tis.where(DagRun.execution_date <= end_date)
if state:
if isinstance(state, (str, TaskInstanceState)):
tis = tis.where(TaskInstance.state == state)
elif len(state) == 1:
tis = tis.where(TaskInstance.state == state[0])
else:
# this is required to deal with NULL values
if None in state:
if all(x is None for x in state):
tis = tis.where(TaskInstance.state.is_(None))
else:
not_none_state = [s for s in state if s]
tis = tis.where(
or_(TaskInstance.state.in_(not_none_state), TaskInstance.state.is_(None))
)
else:
tis = tis.where(TaskInstance.state.in_(state))
# Next, get any of them from our parent DAG (if there is one)
if include_parentdag and self.parent_dag is not None:
if visited_external_tis is None:
visited_external_tis = set()
p_dag = self.parent_dag.partial_subset(
task_ids_or_regex=r"^{}$".format(self.dag_id.split(".")[1]),
include_upstream=False,
include_downstream=True,
)
result.update(
p_dag._get_task_instances(
task_ids=task_ids,
start_date=start_date,
end_date=end_date,
run_id=None,
state=state,
include_subdags=include_subdags,
include_parentdag=False,
include_dependent_dags=include_dependent_dags,
as_pk_tuple=True,
exclude_task_ids=exclude_task_ids,
session=session,
dag_bag=dag_bag,
recursion_depth=recursion_depth,
max_recursion_depth=max_recursion_depth,
visited_external_tis=visited_external_tis,
)
)
if include_dependent_dags:
# Recursively find external tasks indicated by ExternalTaskMarker
from airflow.sensors.external_task import ExternalTaskMarker
query = tis
if as_pk_tuple:
all_tis = session.execute(query).all()
condition = TI.filter_for_tis(TaskInstanceKey(*cols) for cols in all_tis)
if condition is not None:
query = select(TI).where(condition)
if visited_external_tis is None:
visited_external_tis = set()
external_tasks = session.scalars(query.where(TI.operator == ExternalTaskMarker.__name__))
for ti in external_tasks:
ti_key = ti.key.primary
if ti_key in visited_external_tis:
continue
visited_external_tis.add(ti_key)
task: ExternalTaskMarker = cast(ExternalTaskMarker, copy.copy(self.get_task(ti.task_id)))
ti.task = task
if max_recursion_depth is None:
# Maximum recursion depth allowed is the recursion_depth of the first
# ExternalTaskMarker in the tasks to be visited.
max_recursion_depth = task.recursion_depth
if recursion_depth + 1 > max_recursion_depth:
# Prevent cycles or accidents.
raise AirflowException(
f"Maximum recursion depth {max_recursion_depth} reached for "
f"{ExternalTaskMarker.__name__} {ti.task_id}. "
f"Attempted to clear too many tasks or there may be a cyclic dependency."
)
ti.render_templates()
external_tis = session.scalars(
select(TI)
.join(TI.dag_run)
.where(
TI.dag_id == task.external_dag_id,
TI.task_id == task.external_task_id,
DagRun.execution_date == pendulum.parse(task.execution_date),
)
)
for tii in external_tis:
if not dag_bag:
from airflow.models.dagbag import DagBag
dag_bag = DagBag(read_dags_from_db=True)
external_dag = dag_bag.get_dag(tii.dag_id, session=session)
if not external_dag:
raise AirflowException(f"Could not find dag {tii.dag_id}")
downstream = external_dag.partial_subset(
task_ids_or_regex=[tii.task_id],
include_upstream=False,
include_downstream=True,
)
result.update(
downstream._get_task_instances(
task_ids=None,
run_id=tii.run_id,
start_date=None,
end_date=None,
state=state,
include_subdags=include_subdags,
include_dependent_dags=include_dependent_dags,
include_parentdag=False,
as_pk_tuple=True,
exclude_task_ids=exclude_task_ids,
dag_bag=dag_bag,
session=session,
recursion_depth=recursion_depth + 1,
max_recursion_depth=max_recursion_depth,
visited_external_tis=visited_external_tis,
)
)
if result or as_pk_tuple:
# Only execute the `ti` query if we have also collected some other results (i.e. subdags etc.)
if as_pk_tuple:
tis_query = session.execute(tis).all()
result.update(TaskInstanceKey(**cols._mapping) for cols in tis_query)
else:
result.update(ti.key for ti in session.scalars(tis))
if exclude_task_ids is not None:
result = {
task
for task in result
if task.task_id not in exclude_task_ids
and (task.task_id, task.map_index) not in exclude_task_ids
}
if as_pk_tuple:
return result
if result:
# We've been asked for objects, lets combine it all back in to a result set
ti_filters = TI.filter_for_tis(result)
if ti_filters is not None:
tis = select(TI).where(ti_filters)
elif exclude_task_ids is None:
pass # Disable filter if not set.
elif isinstance(next(iter(exclude_task_ids), None), str):
tis = tis.where(TI.task_id.notin_(exclude_task_ids))
else:
tis = tis.where(not_(tuple_in_condition((TI.task_id, TI.map_index), exclude_task_ids)))
return tis
@provide_session
def set_task_instance_state(
self,
*,
task_id: str,
map_indexes: Collection[int] | None = None,
execution_date: datetime | None = None,
run_id: str | None = None,
state: TaskInstanceState,
upstream: bool = False,
downstream: bool = False,
future: bool = False,
past: bool = False,
commit: bool = True,
session=NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the state of a TaskInstance to the given state, and clear its downstream tasks that are
in failed or upstream_failed state.
:param task_id: Task ID of the TaskInstance
:param map_indexes: Only set TaskInstance if its map_index matches.
If None (default), all mapped TaskInstances of the task are set.
:param execution_date: Execution date of the TaskInstance
:param run_id: The run_id of the TaskInstance
:param state: State to set the TaskInstance to
:param upstream: Include all upstream tasks of the given task_id
:param downstream: Include all downstream tasks of the given task_id
:param future: Include all future TaskInstances of the given task_id
:param commit: Commit changes
:param past: Include all past TaskInstances of the given task_id
"""
from airflow.api.common.mark_tasks import set_state
if not exactly_one(execution_date, run_id):
raise ValueError("Exactly one of execution_date or run_id must be provided")
task = self.get_task(task_id)
task.dag = self
tasks_to_set_state: list[Operator | tuple[Operator, int]]
if map_indexes is None:
tasks_to_set_state = [task]
else:
tasks_to_set_state = [(task, map_index) for map_index in map_indexes]
altered = set_state(
tasks=tasks_to_set_state,
execution_date=execution_date,
run_id=run_id,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=commit,
session=session,
)
if not commit:
return altered
# Clear downstream tasks that are in failed/upstream_failed state to resume them.
# Flush the session so that the tasks marked success are reflected in the db.
session.flush()
subdag = self.partial_subset(
task_ids_or_regex={task_id},
include_downstream=True,
include_upstream=False,
)
if execution_date is None:
dag_run = session.scalars(
select(DagRun).where(DagRun.run_id == run_id, DagRun.dag_id == self.dag_id)
).one() # Raises an error if not found
resolve_execution_date = dag_run.execution_date
else:
resolve_execution_date = execution_date
end_date = resolve_execution_date if not future else None
start_date = resolve_execution_date if not past else None
subdag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=True,
include_parentdag=True,
only_failed=True,
session=session,
# Exclude the task itself from being cleared
exclude_task_ids=frozenset({task_id}),
)
return altered
@provide_session
def set_task_group_state(
self,
*,
group_id: str,
execution_date: datetime | None = None,
run_id: str | None = None,
state: TaskInstanceState,
upstream: bool = False,
downstream: bool = False,
future: bool = False,
past: bool = False,
commit: bool = True,
session: Session = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the state of the TaskGroup to the given state, and clear its downstream tasks that are
in failed or upstream_failed state.
:param group_id: The group_id of the TaskGroup
:param execution_date: Execution date of the TaskInstance
:param run_id: The run_id of the TaskInstance
:param state: State to set the TaskInstance to
:param upstream: Include all upstream tasks of the given task_id
:param downstream: Include all downstream tasks of the given task_id
:param future: Include all future TaskInstances of the given task_id
:param commit: Commit changes
:param past: Include all past TaskInstances of the given task_id
:param session: new session
"""
from airflow.api.common.mark_tasks import set_state
if not exactly_one(execution_date, run_id):
raise ValueError("Exactly one of execution_date or run_id must be provided")
tasks_to_set_state: list[BaseOperator | tuple[BaseOperator, int]] = []
task_ids: list[str] = []
if execution_date is None:
dag_run = session.scalars(
select(DagRun).where(DagRun.run_id == run_id, DagRun.dag_id == self.dag_id)
).one() # Raises an error if not found
resolve_execution_date = dag_run.execution_date
else:
resolve_execution_date = execution_date
end_date = resolve_execution_date if not future else None
start_date = resolve_execution_date if not past else None
task_group_dict = self.task_group.get_task_group_dict()
task_group = task_group_dict.get(group_id)
if task_group is None:
raise ValueError("TaskGroup {group_id} could not be found")
tasks_to_set_state = [task for task in task_group.iter_tasks() if isinstance(task, BaseOperator)]
task_ids = [task.task_id for task in task_group.iter_tasks()]
dag_runs_query = session.query(DagRun.id).where(DagRun.dag_id == self.dag_id)
if start_date is None and end_date is None:
dag_runs_query = dag_runs_query.where(DagRun.execution_date == start_date)
else:
if start_date is not None:
dag_runs_query = dag_runs_query.where(DagRun.execution_date >= start_date)
if end_date is not None:
dag_runs_query = dag_runs_query.where(DagRun.execution_date <= end_date)
with lock_rows(dag_runs_query, session):
altered = set_state(
tasks=tasks_to_set_state,
execution_date=execution_date,
run_id=run_id,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=commit,
session=session,
)
if not commit:
return altered
# Clear downstream tasks that are in failed/upstream_failed state to resume them.
# Flush the session so that the tasks marked success are reflected in the db.
session.flush()
task_subset = self.partial_subset(
task_ids_or_regex=task_ids,
include_downstream=True,
include_upstream=False,
)
task_subset.clear(
start_date=start_date,
end_date=end_date,
include_subdags=True,
include_parentdag=True,
only_failed=True,
session=session,
# Exclude the task from the current group from being cleared
exclude_task_ids=frozenset(task_ids),
)
return altered
@property
def roots(self) -> list[Operator]:
"""Return nodes with no parents. These are first to execute and are called roots or root nodes."""
return [task for task in self.tasks if not task.upstream_list]
@property
def leaves(self) -> list[Operator]:
"""Return nodes with no children. These are last to execute and are called leaves or leaf nodes."""
return [task for task in self.tasks if not task.downstream_list]
def topological_sort(self, include_subdag_tasks: bool = False):
"""
Sorts tasks in topographical order, such that a task comes after any of its
upstream dependencies.
Deprecated in place of ``task_group.topological_sort``
"""
from airflow.utils.task_group import TaskGroup
def nested_topo(group):
for node in group.topological_sort(_include_subdag_tasks=include_subdag_tasks):
if isinstance(node, TaskGroup):
yield from nested_topo(node)
else:
yield node
return tuple(nested_topo(self.task_group))
@provide_session
def set_dag_runs_state(
self,
state: DagRunState = DagRunState.RUNNING,
session: Session = NEW_SESSION,
start_date: datetime | None = None,
end_date: datetime | None = None,
dag_ids: list[str] = [],
) -> None:
warnings.warn(
"This method is deprecated and will be removed in a future version.",
RemovedInAirflow3Warning,
stacklevel=3,
)
dag_ids = dag_ids or [self.dag_id]
query = update(DagRun).where(DagRun.dag_id.in_(dag_ids))
if start_date:
query = query.where(DagRun.execution_date >= start_date)
if end_date:
query = query.where(DagRun.execution_date <= end_date)
session.execute(query.values(state=state).execution_options(synchronize_session="fetch"))
@provide_session
def clear(
self,
task_ids: Collection[str | tuple[str, int]] | None = None,
start_date: datetime | None = None,
end_date: datetime | None = None,
only_failed: bool = False,
only_running: bool = False,
confirm_prompt: bool = False,
include_subdags: bool = True,
include_parentdag: bool = True,
dag_run_state: DagRunState = DagRunState.QUEUED,
dry_run: bool = False,
session: Session = NEW_SESSION,
get_tis: bool = False,
recursion_depth: int = 0,
max_recursion_depth: int | None = None,
dag_bag: DagBag | None = None,
exclude_task_ids: frozenset[str] | frozenset[tuple[str, int]] | None = frozenset(),
) -> int | Iterable[TaskInstance]:
"""
Clears a set of task instances associated with the current dag for
a specified date range.
:param task_ids: List of task ids or (``task_id``, ``map_index``) tuples to clear
:param start_date: The minimum execution_date to clear
:param end_date: The maximum execution_date to clear
:param only_failed: Only clear failed tasks
:param only_running: Only clear running tasks.
:param confirm_prompt: Ask for confirmation
:param include_subdags: Clear tasks in subdags and clear external tasks
indicated by ExternalTaskMarker
:param include_parentdag: Clear tasks in the parent dag of the subdag.
:param dag_run_state: state to set DagRun to. If set to False, dagrun state will not
be changed.
:param dry_run: Find the tasks to clear but don't clear them.
:param session: The sqlalchemy session to use
:param dag_bag: The DagBag used to find the dags subdags (Optional)
:param exclude_task_ids: A set of ``task_id`` or (``task_id``, ``map_index``)
tuples that should not be cleared
"""
if get_tis:
warnings.warn(
"Passing `get_tis` to dag.clear() is deprecated. Use `dry_run` parameter instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
dry_run = True
if recursion_depth:
warnings.warn(
"Passing `recursion_depth` to dag.clear() is deprecated.",
RemovedInAirflow3Warning,
stacklevel=2,
)
if max_recursion_depth:
warnings.warn(
"Passing `max_recursion_depth` to dag.clear() is deprecated.",
RemovedInAirflow3Warning,
stacklevel=2,
)
state: list[TaskInstanceState] = []
if only_failed:
state += [TaskInstanceState.FAILED, TaskInstanceState.UPSTREAM_FAILED]
if only_running:
# Yes, having `+=` doesn't make sense, but this was the existing behaviour
state += [TaskInstanceState.RUNNING]
tis = self._get_task_instances(
task_ids=task_ids,
start_date=start_date,
end_date=end_date,
run_id=None,
state=state,
include_subdags=include_subdags,
include_parentdag=include_parentdag,
include_dependent_dags=include_subdags, # compat, yes this is not a typo
session=session,
dag_bag=dag_bag,
exclude_task_ids=exclude_task_ids,
)
if dry_run:
return session.scalars(tis).all()
tis = session.scalars(tis).all()
count = len(list(tis))
do_it = True
if count == 0:
return 0
if confirm_prompt:
ti_list = "\n".join(str(t) for t in tis)
question = (
"You are about to delete these {count} tasks:\n{ti_list}\n\nAre you sure? [y/n]"
).format(count=count, ti_list=ti_list)
do_it = utils.helpers.ask_yesno(question)
if do_it:
clear_task_instances(
list(tis),
session,
dag=self,
dag_run_state=dag_run_state,
)
else:
count = 0
print("Cancelled, nothing was cleared.")
session.flush()
return count
@classmethod
def clear_dags(
cls,
dags,
start_date=None,
end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
include_parentdag=False,
dag_run_state=DagRunState.QUEUED,
dry_run=False,
):
all_tis = []
for dag in dags:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
include_parentdag=include_parentdag,
dag_run_state=dag_run_state,
dry_run=True,
)
all_tis.extend(tis)
if dry_run:
return all_tis
count = len(all_tis)
do_it = True
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join(str(t) for t in all_tis)
question = f"You are about to delete these {count} tasks:\n{ti_list}\n\nAre you sure? [y/n]"
do_it = utils.helpers.ask_yesno(question)
if do_it:
for dag in dags:
dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
dag_run_state=dag_run_state,
dry_run=False,
)
else:
count = 0
print("Cancelled, nothing was cleared.")
return count
def __deepcopy__(self, memo):
# Switcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ("user_defined_macros", "user_defined_filters", "_log"):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
if hasattr(self, "_log"):
result._log = self._log
return result
def sub_dag(self, *args, **kwargs):
"""This method is deprecated in favor of partial_subset."""
warnings.warn(
"This method is deprecated and will be removed in a future version. Please use partial_subset",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.partial_subset(*args, **kwargs)
def partial_subset(
self,
task_ids_or_regex: str | Pattern | Iterable[str],
include_downstream=False,
include_upstream=True,
include_direct_upstream=False,
):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
:param task_ids_or_regex: Either a list of task_ids, or a regex to
match against task ids (as a string, or compiled regex pattern).
:param include_downstream: Include all downstream tasks of matched
tasks, in addition to matched tasks.
:param include_upstream: Include all upstream tasks of matched tasks,
in addition to matched tasks.
:param include_direct_upstream: Include all tasks directly upstream of matched
and downstream (if include_downstream = True) tasks
"""
from airflow.models.baseoperator import BaseOperator
from airflow.models.mappedoperator import MappedOperator
# deep-copying self.task_dict and self._task_group takes a long time, and we don't want all
# the tasks anyway, so we copy the tasks manually later
memo = {id(self.task_dict): None, id(self._task_group): None}
dag = copy.deepcopy(self, memo) # type: ignore
if isinstance(task_ids_or_regex, (str, Pattern)):
matched_tasks = [t for t in self.tasks if re2.findall(task_ids_or_regex, t.task_id)]
else:
matched_tasks = [t for t in self.tasks if t.task_id in task_ids_or_regex]
also_include: list[Operator] = []
for t in matched_tasks:
if include_downstream:
also_include.extend(t.get_flat_relatives(upstream=False))
if include_upstream:
also_include.extend(t.get_upstreams_follow_setups())
else:
also_include.extend(t.get_upstreams_only_setups_and_teardowns())
if t.is_setup and not include_downstream:
also_include.extend(x for x in t.downstream_list if x.is_teardown)
direct_upstreams: list[Operator] = []
if include_direct_upstream:
for t in itertools.chain(matched_tasks, also_include):
upstream = (u for u in t.upstream_list if isinstance(u, (BaseOperator, MappedOperator)))
direct_upstreams.extend(upstream)
# Compiling the unique list of tasks that made the cut
# Make sure to not recursively deepcopy the dag or task_group while copying the task.
# task_group is reset later
def _deepcopy_task(t) -> Operator:
memo.setdefault(id(t.task_group), None)
return copy.deepcopy(t, memo)
dag.task_dict = {
t.task_id: _deepcopy_task(t)
for t in itertools.chain(matched_tasks, also_include, direct_upstreams)
}
def filter_task_group(group, parent_group):
"""Exclude tasks not included in the subdag from the given TaskGroup."""
# We want to deepcopy _most but not all_ attributes of the task group, so we create a shallow copy
# and then manually deep copy the instances. (memo argument to deepcopy only works for instances
# of classes, not "native" properties of an instance)
copied = copy.copy(group)
memo[id(group.children)] = {}
if parent_group:
memo[id(group.parent_group)] = parent_group
for attr, value in copied.__dict__.items():
if id(value) in memo:
value = memo[id(value)]
else:
value = copy.deepcopy(value, memo)
copied.__dict__[attr] = value
proxy = weakref.proxy(copied)
for child in group.children.values():
if isinstance(child, AbstractOperator):
if child.task_id in dag.task_dict:
task = copied.children[child.task_id] = dag.task_dict[child.task_id]
task.task_group = proxy
else:
copied.used_group_ids.discard(child.task_id)
else:
filtered_child = filter_task_group(child, proxy)
# Only include this child TaskGroup if it is non-empty.
if filtered_child.children:
copied.children[child.group_id] = filtered_child
return copied
dag._task_group = filter_task_group(self.task_group, None)
# Removing upstream/downstream references to tasks and TaskGroups that did not make
# the cut.
subdag_task_groups = dag.task_group.get_task_group_dict()
for group in subdag_task_groups.values():
group.upstream_group_ids.intersection_update(subdag_task_groups)
group.downstream_group_ids.intersection_update(subdag_task_groups)
group.upstream_task_ids.intersection_update(dag.task_dict)
group.downstream_task_ids.intersection_update(dag.task_dict)
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# make the cut
t.upstream_task_ids.intersection_update(dag.task_dict)
t.downstream_task_ids.intersection_update(dag.task_dict)
if len(dag.tasks) < len(self.tasks):
dag.partial = True
return dag
def has_task(self, task_id: str):
return task_id in self.task_dict
def has_task_group(self, task_group_id: str) -> bool:
return task_group_id in self.task_group_dict
@functools.cached_property
def task_group_dict(self):
return {k: v for k, v in self._task_group.get_task_group_dict().items() if k is not None}
def get_task(self, task_id: str, include_subdags: bool = False) -> Operator:
if task_id in self.task_dict:
return self.task_dict[task_id]
if include_subdags:
for dag in self.subdags:
if task_id in dag.task_dict:
return dag.task_dict[task_id]
raise TaskNotFound(f"Task {task_id} not found")
def pickle_info(self):
d = {}
d["is_picklable"] = True
try:
dttm = timezone.utcnow()
pickled = pickle.dumps(self)
d["pickle_len"] = len(pickled)
d["pickling_duration"] = str(timezone.utcnow() - dttm)
except Exception as e:
self.log.debug(e)
d["is_picklable"] = False
d["stacktrace"] = traceback.format_exc()
return d
@provide_session
def pickle(self, session=NEW_SESSION) -> DagPickle:
dag = session.scalar(select(DagModel).where(DagModel.dag_id == self.dag_id).limit(1))
dp = None
if dag and dag.pickle_id:
dp = session.scalar(select(DagPickle).where(DagPickle.id == dag.pickle_id).limit(1))
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = timezone.utcnow()
session.commit()
self.pickle_id = dp.id
return dp
def tree_view(self) -> None:
"""Print an ASCII tree representation of the DAG."""
def get_downstream(task, level=0):
print((" " * level * 4) + str(task))
level += 1
for t in task.downstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
@property
def task(self) -> TaskDecoratorCollection:
from airflow.decorators import task
return cast("TaskDecoratorCollection", functools.partial(task, dag=self))
def add_task(self, task: Operator) -> None:
"""
Add a task to the DAG.
:param task: the task you want to add
"""
DagInvalidTriggerRule.check(self, task.trigger_rule)
from airflow.utils.task_group import TaskGroupContext
if not self.start_date and not task.start_date:
raise AirflowException("DAG is missing the start_date parameter")
# if the task has no start date, assign it the same as the DAG
elif not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the DAG's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the DAG's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
task_id = task.task_id
if not task.task_group:
task_group = TaskGroupContext.get_current_task_group(self)
if task_group:
task_id = task_group.child_id(task_id)
task_group.add(task)
if (
task_id in self.task_dict and self.task_dict[task_id] is not task
) or task_id in self._task_group.used_group_ids:
raise DuplicateTaskIdFound(f"Task id '{task_id}' has already been added to the DAG")
else:
self.task_dict[task_id] = task
task.dag = self
# Add task_id to used_group_ids to prevent group_id and task_id collisions.
self._task_group.used_group_ids.add(task_id)
self.task_count = len(self.task_dict)
def add_tasks(self, tasks: Iterable[Operator]) -> None:
"""
Add a list of tasks to the DAG.
:param tasks: a lit of tasks you want to add
"""
for task in tasks:
self.add_task(task)
def _remove_task(self, task_id: str) -> None:
# This is "private" as removing could leave a hole in dependencies if done incorrectly, and this
# doesn't guard against that
task = self.task_dict.pop(task_id)
tg = getattr(task, "task_group", None)
if tg:
tg._remove(task)
self.task_count = len(self.task_dict)
def run(
self,
start_date=None,
end_date=None,
mark_success=False,
local=False,
executor=None,
donot_pickle=airflow_conf.getboolean("core", "donot_pickle"),
ignore_task_deps=False,
ignore_first_depends_on_past=True,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
run_backwards=False,
run_at_least_once=False,
continue_on_failures=False,
disable_retry=False,
):
"""
Runs the DAG.
:param start_date: the start date of the range to run
:param end_date: the end date of the range to run
:param mark_success: True to mark jobs as succeeded without running them
:param local: True to run the tasks using the LocalExecutor
:param executor: The executor instance to run the tasks
:param donot_pickle: True to avoid pickling DAG object and send to workers
:param ignore_task_deps: True to skip upstream tasks
:param ignore_first_depends_on_past: True to ignore depends_on_past
dependencies for the first set of tasks only
:param pool: Resource pool to use
:param delay_on_limit_secs: Time in seconds to wait before next attempt to run
dag run when max_active_runs limit has been reached
:param verbose: Make logging output more verbose
:param conf: user defined dictionary passed from CLI
:param rerun_failed_tasks:
:param run_backwards:
:param run_at_least_once: If true, always run the DAG at least once even
if no logical run exists within the time range.
"""
from airflow.jobs.backfill_job_runner import BackfillJobRunner
if not executor and local:
from airflow.executors.local_executor import LocalExecutor
executor = LocalExecutor()
elif not executor:
from airflow.executors.executor_loader import ExecutorLoader
executor = ExecutorLoader.get_default_executor()
from airflow.jobs.job import Job
job = Job(executor=executor)
job_runner = BackfillJobRunner(
job=job,
dag=self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
donot_pickle=donot_pickle,
ignore_task_deps=ignore_task_deps,
ignore_first_depends_on_past=ignore_first_depends_on_past,
pool=pool,
delay_on_limit_secs=delay_on_limit_secs,
verbose=verbose,
conf=conf,
rerun_failed_tasks=rerun_failed_tasks,
run_backwards=run_backwards,
run_at_least_once=run_at_least_once,
continue_on_failures=continue_on_failures,
disable_retry=disable_retry,
)
run_job(job=job, execute_callable=job_runner._execute)
def cli(self):
"""Exposes a CLI specific to this DAG."""
check_cycle(self)
from airflow.cli import cli_parser
parser = cli_parser.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@provide_session
def test(
self,
execution_date: datetime | None = None,
run_conf: dict[str, Any] | None = None,
conn_file_path: str | None = None,
variable_file_path: str | None = None,
session: Session = NEW_SESSION,
) -> None:
"""
Execute one single DagRun for a given DAG and execution date.
:param execution_date: execution date for the DAG run
:param run_conf: configuration to pass to newly created dagrun
:param conn_file_path: file path to a connection file in either yaml or json
:param variable_file_path: file path to a variable file in either yaml or json
:param session: database connection (optional)
"""
def add_logger_if_needed(ti: TaskInstance):
"""Add a formatted logger to the task instance.
This allows all logs to surface to the command line, instead of into
a task file. Since this is a local test run, it is much better for
the user to see logs in the command line, rather than needing to
search for a log file.
:param ti: The task instance that will receive a logger.
"""
format = logging.Formatter("[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.level = logging.INFO
handler.setFormatter(format)
# only add log handler once
if not any(isinstance(h, logging.StreamHandler) for h in ti.log.handlers):
self.log.debug("Adding Streamhandler to taskinstance %s", ti.task_id)
ti.log.addHandler(handler)
if conn_file_path or variable_file_path:
local_secrets = LocalFilesystemBackend(
variables_file_path=variable_file_path, connections_file_path=conn_file_path
)
secrets_backend_list.insert(0, local_secrets)
execution_date = execution_date or timezone.utcnow()
self.log.debug("Clearing existing task instances for execution date %s", execution_date)
self.clear(
start_date=execution_date,
end_date=execution_date,
dag_run_state=False, # type: ignore
session=session,
)
self.log.debug("Getting dagrun for dag %s", self.dag_id)
logical_date = timezone.coerce_datetime(execution_date)
data_interval = self.timetable.infer_manual_data_interval(run_after=logical_date)
dr: DagRun = _get_or_create_dagrun(
dag=self,
start_date=execution_date,
execution_date=execution_date,
run_id=DagRun.generate_run_id(DagRunType.MANUAL, execution_date),
session=session,
conf=run_conf,
data_interval=data_interval,
)
tasks = self.task_dict
self.log.debug("starting dagrun")
# Instead of starting a scheduler, we run the minimal loop possible to check
# for task readiness and dependency management. This is notably faster
# than creating a BackfillJob and allows us to surface logs to the user
while dr.state == DagRunState.RUNNING:
schedulable_tis, _ = dr.update_state(session=session)
try:
for ti in schedulable_tis:
add_logger_if_needed(ti)
ti.task = tasks[ti.task_id]
_run_task(ti, session=session)
except Exception:
self.log.info(
"Task failed. DAG will continue to run until finished and be marked as failed.",
exc_info=True,
)
if conn_file_path or variable_file_path:
# Remove the local variables we have added to the secrets_backend_list
secrets_backend_list.pop(0)
@provide_session
def create_dagrun(
self,
state: DagRunState,
execution_date: datetime | None = None,
run_id: str | None = None,
start_date: datetime | None = None,
external_trigger: bool | None = False,
conf: dict | None = None,
run_type: DagRunType | None = None,
session: Session = NEW_SESSION,
dag_hash: str | None = None,
creating_job_id: int | None = None,
data_interval: tuple[datetime, datetime] | None = None,
):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the run id for this dag run
:param run_type: type of DagRun
:param execution_date: the execution date of this dag run
:param state: the state of the dag run
:param start_date: the date this dag run should be evaluated
:param external_trigger: whether this dag run is externally triggered
:param conf: Dict containing configuration/parameters to pass to the DAG
:param creating_job_id: id of the job creating this DagRun
:param session: database session
:param dag_hash: Hash of Serialized DAG
:param data_interval: Data interval of the DagRun
"""
logical_date = timezone.coerce_datetime(execution_date)
if data_interval and not isinstance(data_interval, DataInterval):
data_interval = DataInterval(*map(timezone.coerce_datetime, data_interval))
if data_interval is None and logical_date is not None:
warnings.warn(
"Calling `DAG.create_dagrun()` without an explicit data interval is deprecated",
RemovedInAirflow3Warning,
stacklevel=3,
)
if run_type == DagRunType.MANUAL:
data_interval = self.timetable.infer_manual_data_interval(run_after=logical_date)
else:
data_interval = self.infer_automated_data_interval(logical_date)
if run_type is None or isinstance(run_type, DagRunType):
pass
elif isinstance(run_type, str): # Compatibility: run_type used to be a str.
run_type = DagRunType(run_type)
else:
raise ValueError(f"`run_type` should be a DagRunType, not {type(run_type)}")
if run_id: # Infer run_type from run_id if needed.
if not isinstance(run_id, str):
raise ValueError(f"`run_id` should be a str, not {type(run_id)}")
inferred_run_type = DagRunType.from_run_id(run_id)
if run_type is None:
# No explicit type given, use the inferred type.
run_type = inferred_run_type
elif run_type == DagRunType.MANUAL and inferred_run_type != DagRunType.MANUAL:
# Prevent a manual run from using an ID that looks like a scheduled run.
raise ValueError(
f"A {run_type.value} DAG run cannot use ID {run_id!r} since it "
f"is reserved for {inferred_run_type.value} runs"
)
elif run_type and logical_date is not None: # Generate run_id from run_type and execution_date.
run_id = self.timetable.generate_run_id(
run_type=run_type, logical_date=logical_date, data_interval=data_interval
)
else:
raise AirflowException(
"Creating DagRun needs either `run_id` or both `run_type` and `execution_date`"
)
regex = airflow_conf.get("scheduler", "allowed_run_id_pattern")
if run_id and not re2.match(RUN_ID_REGEX, run_id):
if not regex.strip() or not re2.match(regex.strip(), run_id):
raise AirflowException(
f"The provided run ID '{run_id}' is invalid. It does not match either "
f"the configured pattern: '{regex}' or the built-in pattern: '{RUN_ID_REGEX}'"
)
# create a copy of params before validating
copied_params = copy.deepcopy(self.params)
copied_params.update(conf or {})
copied_params.validate()
run = DagRun(
dag_id=self.dag_id,
run_id=run_id,
execution_date=logical_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
state=state,
run_type=run_type,
dag_hash=dag_hash,
creating_job_id=creating_job_id,
data_interval=data_interval,
)
session.add(run)
session.flush()
run.dag = self
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session)
return run
@classmethod
@provide_session
def bulk_sync_to_db(
cls,
dags: Collection[DAG],
session=NEW_SESSION,
):
"""This method is deprecated in favor of bulk_write_to_db."""
warnings.warn(
"This method is deprecated and will be removed in a future version. Please use bulk_write_to_db",
RemovedInAirflow3Warning,
stacklevel=2,
)
return cls.bulk_write_to_db(dags=dags, session=session)
@classmethod
@provide_session
def bulk_write_to_db(
cls,
dags: Collection[DAG],
processor_subdir: str | None = None,
session=NEW_SESSION,
):
"""
Ensure the DagModel rows for the given dags are up-to-date in the dag table in the DB, including
calculated fields.
Note that this method can be called for both DAGs and SubDAGs. A SubDag is actually a SubDagOperator.
:param dags: the DAG objects to save to the DB
:return: None
"""
if not dags:
return
log.info("Sync %s DAGs", len(dags))
dag_by_ids = {dag.dag_id: dag for dag in dags}
dag_ids = set(dag_by_ids.keys())
query = (
select(DagModel)
.options(joinedload(DagModel.tags, innerjoin=False))
.where(DagModel.dag_id.in_(dag_ids))
.options(joinedload(DagModel.schedule_dataset_references))
.options(joinedload(DagModel.task_outlet_dataset_references))
)
query = with_row_locks(query, of=DagModel, session=session)
orm_dags: list[DagModel] = session.scalars(query).unique().all()
existing_dags = {orm_dag.dag_id: orm_dag for orm_dag in orm_dags}
missing_dag_ids = dag_ids.difference(existing_dags)
for missing_dag_id in missing_dag_ids:
orm_dag = DagModel(dag_id=missing_dag_id)
dag = dag_by_ids[missing_dag_id]
if dag.is_paused_upon_creation is not None:
orm_dag.is_paused = dag.is_paused_upon_creation
orm_dag.tags = []
log.info("Creating ORM DAG for %s", dag.dag_id)
session.add(orm_dag)
orm_dags.append(orm_dag)
most_recent_runs: dict[str, DagRun] = {}
num_active_runs: dict[str, int] = {}
# Skip these queries entirely if no DAGs can be scheduled to save time.
if any(dag.timetable.can_be_scheduled for dag in dags):
# Get the latest dag run for each existing dag as a single query (avoid n+1 query)
most_recent_subq = (
select(DagRun.dag_id, func.max(DagRun.execution_date).label("max_execution_date"))
.where(
DagRun.dag_id.in_(existing_dags),
or_(DagRun.run_type == DagRunType.BACKFILL_JOB, DagRun.run_type == DagRunType.SCHEDULED),
)
.group_by(DagRun.dag_id)
.subquery()
)
most_recent_runs_iter = session.scalars(
select(DagRun).where(
DagRun.dag_id == most_recent_subq.c.dag_id,
DagRun.execution_date == most_recent_subq.c.max_execution_date,
)
)
most_recent_runs = {run.dag_id: run for run in most_recent_runs_iter}
# Get number of active dagruns for all dags we are processing as a single query.
num_active_runs = DagRun.active_runs_of_dags(dag_ids=existing_dags, session=session)
filelocs = []
for orm_dag in sorted(orm_dags, key=lambda d: d.dag_id):
dag = dag_by_ids[orm_dag.dag_id]
filelocs.append(dag.fileloc)
if dag.is_subdag:
orm_dag.is_subdag = True
orm_dag.fileloc = dag.parent_dag.fileloc # type: ignore
orm_dag.root_dag_id = dag.parent_dag.dag_id # type: ignore
orm_dag.owners = dag.parent_dag.owner # type: ignore
else:
orm_dag.is_subdag = False
orm_dag.fileloc = dag.fileloc
orm_dag.owners = dag.owner
orm_dag.is_active = True
orm_dag.has_import_errors = False
orm_dag.last_parsed_time = timezone.utcnow()
orm_dag.default_view = dag.default_view
orm_dag.description = dag.description
orm_dag.max_active_tasks = dag.max_active_tasks
orm_dag.max_active_runs = dag.max_active_runs
orm_dag.has_task_concurrency_limits = any(
t.max_active_tis_per_dag is not None or t.max_active_tis_per_dagrun is not None
for t in dag.tasks
)
orm_dag.schedule_interval = dag.schedule_interval
orm_dag.timetable_description = dag.timetable.description
orm_dag.processor_subdir = processor_subdir
run: DagRun | None = most_recent_runs.get(dag.dag_id)
if run is None:
data_interval = None
else:
data_interval = dag.get_run_data_interval(run)
if num_active_runs.get(dag.dag_id, 0) >= orm_dag.max_active_runs:
orm_dag.next_dagrun_create_after = None
else:
orm_dag.calculate_dagrun_date_fields(dag, data_interval)
dag_tags = set(dag.tags or {})
orm_dag_tags = list(orm_dag.tags or [])
for orm_tag in orm_dag_tags:
if orm_tag.name not in dag_tags:
session.delete(orm_tag)
orm_dag.tags.remove(orm_tag)
orm_tag_names = {t.name for t in orm_dag_tags}
for dag_tag in dag_tags:
if dag_tag not in orm_tag_names:
dag_tag_orm = DagTag(name=dag_tag, dag_id=dag.dag_id)
orm_dag.tags.append(dag_tag_orm)
session.add(dag_tag_orm)
orm_dag_links = orm_dag.dag_owner_links or []
for orm_dag_link in orm_dag_links:
if orm_dag_link not in dag.owner_links:
session.delete(orm_dag_link)
for owner_name, owner_link in dag.owner_links.items():
dag_owner_orm = DagOwnerAttributes(dag_id=dag.dag_id, owner=owner_name, link=owner_link)
session.add(dag_owner_orm)
DagCode.bulk_sync_to_db(filelocs, session=session)
from airflow.datasets import Dataset
from airflow.models.dataset import (
DagScheduleDatasetReference,
DatasetModel,
TaskOutletDatasetReference,
)
dag_references = collections.defaultdict(set)
outlet_references = collections.defaultdict(set)
# We can't use a set here as we want to preserve order
outlet_datasets: dict[Dataset, None] = {}
input_datasets: dict[Dataset, None] = {}
# here we go through dags and tasks to check for dataset references
# if there are now None and previously there were some, we delete them
# if there are now *any*, we add them to the above data structures, and
# later we'll persist them to the database.
for dag in dags:
curr_orm_dag = existing_dags.get(dag.dag_id)
if not dag.dataset_triggers:
if curr_orm_dag and curr_orm_dag.schedule_dataset_references:
curr_orm_dag.schedule_dataset_references = []
for dataset in dag.dataset_triggers:
dag_references[dag.dag_id].add(dataset.uri)
input_datasets[DatasetModel.from_public(dataset)] = None
curr_outlet_references = curr_orm_dag and curr_orm_dag.task_outlet_dataset_references
for task in dag.tasks:
dataset_outlets = [x for x in task.outlets or [] if isinstance(x, Dataset)]
if not dataset_outlets:
if curr_outlet_references:
this_task_outlet_refs = [
x
for x in curr_outlet_references
if x.dag_id == dag.dag_id and x.task_id == task.task_id
]
for ref in this_task_outlet_refs:
curr_outlet_references.remove(ref)
for d in dataset_outlets:
outlet_references[(task.dag_id, task.task_id)].add(d.uri)
outlet_datasets[DatasetModel.from_public(d)] = None
all_datasets = outlet_datasets
all_datasets.update(input_datasets)
# store datasets
stored_datasets = {}
for dataset in all_datasets:
stored_dataset = session.scalar(
select(DatasetModel).where(DatasetModel.uri == dataset.uri).limit(1)
)
if stored_dataset:
# Some datasets may have been previously unreferenced, and therefore orphaned by the
# scheduler. But if we're here, then we have found that dataset again in our DAGs, which
# means that it is no longer an orphan, so set is_orphaned to False.
stored_dataset.is_orphaned = expression.false()
stored_datasets[stored_dataset.uri] = stored_dataset
else:
session.add(dataset)
stored_datasets[dataset.uri] = dataset
session.flush() # this is required to ensure each dataset has its PK loaded
del all_datasets
# reconcile dag-schedule-on-dataset references
for dag_id, uri_list in dag_references.items():
dag_refs_needed = {
DagScheduleDatasetReference(dataset_id=stored_datasets[uri].id, dag_id=dag_id)
for uri in uri_list
}
dag_refs_stored = set(
existing_dags.get(dag_id)
and existing_dags.get(dag_id).schedule_dataset_references # type: ignore
or []
)
dag_refs_to_add = {x for x in dag_refs_needed if x not in dag_refs_stored}
session.bulk_save_objects(dag_refs_to_add)
for obj in dag_refs_stored - dag_refs_needed:
session.delete(obj)
existing_task_outlet_refs_dict = collections.defaultdict(set)
for dag_id, orm_dag in existing_dags.items():
for todr in orm_dag.task_outlet_dataset_references:
existing_task_outlet_refs_dict[(dag_id, todr.task_id)].add(todr)
# reconcile task-outlet-dataset references
for (dag_id, task_id), uri_list in outlet_references.items():
task_refs_needed = {
TaskOutletDatasetReference(dataset_id=stored_datasets[uri].id, dag_id=dag_id, task_id=task_id)
for uri in uri_list
}
task_refs_stored = existing_task_outlet_refs_dict[(dag_id, task_id)]
task_refs_to_add = {x for x in task_refs_needed if x not in task_refs_stored}
session.bulk_save_objects(task_refs_to_add)
for obj in task_refs_stored - task_refs_needed:
session.delete(obj)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
for dag in dags:
cls.bulk_write_to_db(dag.subdags, processor_subdir=processor_subdir, session=session)
@provide_session
def sync_to_db(self, processor_subdir: str | None = None, session=NEW_SESSION):
"""
Save attributes about this DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:return: None
"""
self.bulk_write_to_db([self], processor_subdir=processor_subdir, session=session)
def get_default_view(self):
"""This is only there for backward compatible jinja2 templates."""
if self.default_view is None:
return airflow_conf.get("webserver", "dag_default_view").lower()
else:
return self.default_view
@staticmethod
@provide_session
def deactivate_unknown_dags(active_dag_ids, session=NEW_SESSION):
"""
Given a list of known DAGs, deactivate any other DAGs that are
marked as active in the ORM.
:param active_dag_ids: list of DAG IDs that are active
:return: None
"""
if len(active_dag_ids) == 0:
return
for dag in session.scalars(select(DagModel).where(~DagModel.dag_id.in_(active_dag_ids))).all():
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def deactivate_stale_dags(expiration_date, session=NEW_SESSION):
"""
Deactivate any DAGs that were last touched by the scheduler before
the expiration date. These DAGs were likely deleted.
:param expiration_date: set inactive DAGs that were touched before this
time
:return: None
"""
for dag in session.scalars(
select(DagModel).where(DagModel.last_parsed_time < expiration_date, DagModel.is_active)
):
log.info(
"Deactivating DAG ID %s since it was last touched by the scheduler at %s",
dag.dag_id,
dag.last_parsed_time.isoformat(),
)
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def get_num_task_instances(dag_id, run_id=None, task_ids=None, states=None, session=NEW_SESSION) -> int:
"""
Returns the number of task instances in the given DAG.
:param session: ORM session
:param dag_id: ID of the DAG to get the task concurrency of
:param run_id: ID of the DAG run to get the task concurrency of
:param task_ids: A list of valid task IDs for the given DAG
:param states: A list of states to filter by if supplied
:return: The number of running tasks
"""
qry = select(func.count(TaskInstance.task_id)).where(
TaskInstance.dag_id == dag_id,
)
if run_id:
qry = qry.where(
TaskInstance.run_id == run_id,
)
if task_ids:
qry = qry.where(
TaskInstance.task_id.in_(task_ids),
)
if states:
if None in states:
if all(x is None for x in states):
qry = qry.where(TaskInstance.state.is_(None))
else:
not_none_states = [state for state in states if state]
qry = qry.where(
or_(TaskInstance.state.in_(not_none_states), TaskInstance.state.is_(None))
)
else:
qry = qry.where(TaskInstance.state.in_(states))
return session.scalar(qry)
@classmethod
def get_serialized_fields(cls):
"""Stringified DAGs and operators contain exactly these fields."""
if not cls.__serialized_fields:
exclusion_list = {
"parent_dag",
"schedule_dataset_references",
"task_outlet_dataset_references",
"_old_context_manager_dags",
"safe_dag_id",
"last_loaded",
"user_defined_filters",
"user_defined_macros",
"partial",
"params",
"_pickle_id",
"_log",
"task_dict",
"template_searchpath",
"sla_miss_callback",
"on_success_callback",
"on_failure_callback",
"template_undefined",
"jinja_environment_kwargs",
# has_on_*_callback are only stored if the value is True, as the default is False
"has_on_success_callback",
"has_on_failure_callback",
"auto_register",
"fail_stop",
}
cls.__serialized_fields = frozenset(vars(DAG(dag_id="test")).keys()) - exclusion_list
return cls.__serialized_fields
def get_edge_info(self, upstream_task_id: str, downstream_task_id: str) -> EdgeInfoType:
"""
Returns edge information for the given pair of tasks if present, and
an empty edge if there is no information.
"""
# Note - older serialized DAGs may not have edge_info being a dict at all
empty = cast(EdgeInfoType, {})
if self.edge_info:
return self.edge_info.get(upstream_task_id, {}).get(downstream_task_id, empty)
else:
return empty
def set_edge_info(self, upstream_task_id: str, downstream_task_id: str, info: EdgeInfoType):
"""
Sets the given edge information on the DAG. Note that this will overwrite,
rather than merge with, existing info.
"""
self.edge_info.setdefault(upstream_task_id, {})[downstream_task_id] = info
def validate_schedule_and_params(self):
"""
Validates & raise exception if there are any Params in the DAG which neither have a default value nor
have the null in schema['type'] list, but the DAG have a schedule_interval which is not None.
"""
if not self.timetable.can_be_scheduled:
return
for k, v in self.params.items():
# As type can be an array, we would check if `null` is an allowed type or not
if not v.has_value and ("type" not in v.schema or "null" not in v.schema["type"]):
raise AirflowException(
"DAG Schedule must be None, if there are any required params without default values"
)
def iter_invalid_owner_links(self) -> Iterator[tuple[str, str]]:
"""Parses a given link, and verifies if it's a valid URL, or a 'mailto' link.
Returns an iterator of invalid (owner, link) pairs.
"""
for owner, link in self.owner_links.items():
result = urlsplit(link)
if result.scheme == "mailto":
# netloc is not existing for 'mailto' link, so we are checking that the path is parsed
if not result.path:
yield result.path, link
elif not result.scheme or not result.netloc:
yield owner, link
class DagTag(Base):
"""A tag name per dag, to allow quick filtering in the DAG view."""
__tablename__ = "dag_tag"
name = Column(String(TAG_MAX_LEN), primary_key=True)
dag_id = Column(
StringID(),
ForeignKey("dag.dag_id", name="dag_tag_dag_id_fkey", ondelete="CASCADE"),
primary_key=True,
)
def __repr__(self):
return self.name
class DagOwnerAttributes(Base):
"""Table defining different owner attributes.
For example, a link for an owner that will be passed as a hyperlink to the
"DAGs" view.
"""
__tablename__ = "dag_owner_attributes"
dag_id = Column(
StringID(),
ForeignKey("dag.dag_id", name="dag.dag_id", ondelete="CASCADE"),
nullable=False,
primary_key=True,
)
owner = Column(String(500), primary_key=True, nullable=False)
link = Column(String(500), nullable=False)
def __repr__(self):
return f"<DagOwnerAttributes: dag_id={self.dag_id}, owner={self.owner}, link={self.link}>"
@classmethod
def get_all(cls, session) -> dict[str, dict[str, str]]:
dag_links: dict = collections.defaultdict(dict)
for obj in session.scalars(select(cls)):
dag_links[obj.dag_id].update({obj.owner: obj.link})
return dag_links
class DagModel(Base):
"""Table containing DAG properties."""
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(StringID(), primary_key=True)
root_dag_id = Column(StringID())
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = airflow_conf.getboolean("core", "dags_are_paused_at_creation")
is_paused = Column(Boolean, default=is_paused_at_creation)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_parsed_time = Column(UtcDateTime)
# Last time this DAG was pickled
last_pickled = Column(UtcDateTime)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired = Column(UtcDateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
# Note: Do not depend on fileloc pointing to a file; in the case of a
# packaged DAG, it will point to the subpath of the DAG within the
# associated zip.
fileloc = Column(String(2000))
# The base directory used by Dag Processor that parsed this dag.
processor_subdir = Column(String(2000), nullable=True)
# String representing the owners
owners = Column(String(2000))
# Description of the dag
description = Column(Text)
# Default view of the DAG inside the webserver
default_view = Column(String(25))
# Schedule interval
schedule_interval = Column(Interval)
# Timetable/Schedule Interval description
timetable_description = Column(String(1000), nullable=True)
# Tags for view filter
tags = relationship("DagTag", cascade="all, delete, delete-orphan", backref=backref("dag"))
# Dag owner links for DAGs view
dag_owner_links = relationship(
"DagOwnerAttributes", cascade="all, delete, delete-orphan", backref=backref("dag")
)
max_active_tasks = Column(Integer, nullable=False)
max_active_runs = Column(Integer, nullable=True)
has_task_concurrency_limits = Column(Boolean, nullable=False)
has_import_errors = Column(Boolean(), default=False, server_default="0")
# The logical date of the next dag run.
next_dagrun = Column(UtcDateTime)
# Must be either both NULL or both datetime.
next_dagrun_data_interval_start = Column(UtcDateTime)
next_dagrun_data_interval_end = Column(UtcDateTime)
# Earliest time at which this ``next_dagrun`` can be created.
next_dagrun_create_after = Column(UtcDateTime)
__table_args__ = (
Index("idx_root_dag_id", root_dag_id, unique=False),
Index("idx_next_dagrun_create_after", next_dagrun_create_after, unique=False),
)
parent_dag = relationship(
"DagModel", remote_side=[dag_id], primaryjoin=root_dag_id == dag_id, foreign_keys=[root_dag_id]
)
schedule_dataset_references = relationship(
"DagScheduleDatasetReference",
cascade="all, delete, delete-orphan",
)
schedule_datasets = association_proxy("schedule_dataset_references", "dataset")
task_outlet_dataset_references = relationship(
"TaskOutletDatasetReference",
cascade="all, delete, delete-orphan",
)
NUM_DAGS_PER_DAGRUN_QUERY = airflow_conf.getint(
"scheduler", "max_dagruns_to_create_per_loop", fallback=10
)
def __init__(self, concurrency=None, **kwargs):
super().__init__(**kwargs)
if self.max_active_tasks is None:
if concurrency:
warnings.warn(
"The 'DagModel.concurrency' parameter is deprecated. Please use 'max_active_tasks'.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.max_active_tasks = concurrency
else:
self.max_active_tasks = airflow_conf.getint("core", "max_active_tasks_per_dag")
if self.max_active_runs is None:
self.max_active_runs = airflow_conf.getint("core", "max_active_runs_per_dag")
if self.has_task_concurrency_limits is None:
# Be safe -- this will be updated later once the DAG is parsed
self.has_task_concurrency_limits = True
def __repr__(self):
return f"<DAG: {self.dag_id}>"
@property
def next_dagrun_data_interval(self) -> DataInterval | None:
return _get_model_data_interval(
self,
"next_dagrun_data_interval_start",
"next_dagrun_data_interval_end",
)
@next_dagrun_data_interval.setter
def next_dagrun_data_interval(self, value: tuple[datetime, datetime] | None) -> None:
if value is None:
self.next_dagrun_data_interval_start = self.next_dagrun_data_interval_end = None
else:
self.next_dagrun_data_interval_start, self.next_dagrun_data_interval_end = value
@property
def timezone(self):
return settings.TIMEZONE
@staticmethod
@provide_session
def get_dagmodel(dag_id: str, session: Session = NEW_SESSION) -> DagModel | None:
return session.get(
DagModel,
dag_id,
options=[joinedload(DagModel.parent_dag)],
)
@classmethod
@provide_session
def get_current(cls, dag_id, session=NEW_SESSION):
return session.scalar(select(cls).where(cls.dag_id == dag_id))
@provide_session
def get_last_dagrun(self, session=NEW_SESSION, include_externally_triggered=False):
return get_last_dagrun(
self.dag_id, session=session, include_externally_triggered=include_externally_triggered
)
def get_is_paused(self, *, session: Session | None = None) -> bool:
"""Provide interface compatibility to 'DAG'."""
return self.is_paused
@staticmethod
@internal_api_call
@provide_session
def get_paused_dag_ids(dag_ids: list[str], session: Session = NEW_SESSION) -> set[str]:
"""
Given a list of dag_ids, get a set of Paused Dag Ids.
:param dag_ids: List of Dag ids
:param session: ORM Session
:return: Paused Dag_ids
"""
paused_dag_ids = session.execute(
select(DagModel.dag_id)
.where(DagModel.is_paused == expression.true())
.where(DagModel.dag_id.in_(dag_ids))
)
paused_dag_ids = {paused_dag_id for paused_dag_id, in paused_dag_ids}
return paused_dag_ids
def get_default_view(self) -> str:
"""
Get the Default DAG View, returns the default config value if DagModel does not
have a value.
"""
# This is for backwards-compatibility with old dags that don't have None as default_view
return self.default_view or airflow_conf.get_mandatory_value("webserver", "dag_default_view").lower()
@property
def safe_dag_id(self):
return self.dag_id.replace(".", "__dot__")
@property
def relative_fileloc(self) -> pathlib.Path | None:
"""File location of the importable dag 'file' relative to the configured DAGs folder."""
if self.fileloc is None:
return None
path = pathlib.Path(self.fileloc)
try:
return path.relative_to(settings.DAGS_FOLDER)
except ValueError:
# Not relative to DAGS_FOLDER.
return path
@provide_session
def set_is_paused(self, is_paused: bool, including_subdags: bool = True, session=NEW_SESSION) -> None:
"""
Pause/Un-pause a DAG.
:param is_paused: Is the DAG paused
:param including_subdags: whether to include the DAG's subdags
:param session: session
"""
filter_query = [
DagModel.dag_id == self.dag_id,
]
if including_subdags:
filter_query.append(DagModel.root_dag_id == self.dag_id)
session.execute(
update(DagModel)
.where(or_(*filter_query))
.values(is_paused=is_paused)
.execution_options(synchronize_session="fetch")
)
session.commit()
@classmethod
@internal_api_call
@provide_session
def deactivate_deleted_dags(
cls,
alive_dag_filelocs: Container[str],
session: Session = NEW_SESSION,
) -> None:
"""
Set ``is_active=False`` on the DAGs for which the DAG files have been removed.
:param alive_dag_filelocs: file paths of alive DAGs
:param session: ORM Session
"""
log.debug("Deactivating DAGs (for which DAG files are deleted) from %s table ", cls.__tablename__)
dag_models = session.scalars(select(cls).where(cls.fileloc.is_not(None)))
for dag_model in dag_models:
if dag_model.fileloc not in alive_dag_filelocs:
dag_model.is_active = False
@classmethod
def dags_needing_dagruns(cls, session: Session) -> tuple[Query, dict[str, tuple[datetime, datetime]]]:
"""
Return (and lock) a list of Dag objects that are due to create a new DagRun.
This will return a resultset of rows that is row-level-locked with a "SELECT ... FOR UPDATE" query,
you should ensure that any scheduling decisions are made in a single transaction -- as soon as the
transaction is committed it will be unlocked.
"""
from airflow.models.dataset import DagScheduleDatasetReference, DatasetDagRunQueue as DDRQ
# these dag ids are triggered by datasets, and they are ready to go.
dataset_triggered_dag_info = {
x.dag_id: (x.first_queued_time, x.last_queued_time)
for x in session.execute(
select(
DagScheduleDatasetReference.dag_id,
func.max(DDRQ.created_at).label("last_queued_time"),
func.min(DDRQ.created_at).label("first_queued_time"),
)
.join(DagScheduleDatasetReference.queue_records, isouter=True)
.group_by(DagScheduleDatasetReference.dag_id)
.having(func.count() == func.sum(case((DDRQ.target_dag_id.is_not(None), 1), else_=0)))
)
}
dataset_triggered_dag_ids = set(dataset_triggered_dag_info.keys())
if dataset_triggered_dag_ids:
exclusion_list = {
x
for x in (
session.scalars(
select(DagModel.dag_id)
.join(DagRun.dag_model)
.where(DagRun.state.in_((DagRunState.QUEUED, DagRunState.RUNNING)))
.where(DagModel.dag_id.in_(dataset_triggered_dag_ids))
.group_by(DagModel.dag_id)
.having(func.count() >= func.max(DagModel.max_active_runs))
)
)
}
if exclusion_list:
dataset_triggered_dag_ids -= exclusion_list
dataset_triggered_dag_info = {
k: v for k, v in dataset_triggered_dag_info.items() if k not in exclusion_list
}
# We limit so that _one_ scheduler doesn't try to do all the creation of dag runs
query = (
select(cls)
.where(
cls.is_paused == expression.false(),
cls.is_active == expression.true(),
cls.has_import_errors == expression.false(),
or_(
cls.next_dagrun_create_after <= func.now(),
cls.dag_id.in_(dataset_triggered_dag_ids),
),
)
.order_by(cls.next_dagrun_create_after)
.limit(cls.NUM_DAGS_PER_DAGRUN_QUERY)
)
return (
session.scalars(with_row_locks(query, of=cls, session=session, **skip_locked(session=session))),
dataset_triggered_dag_info,
)
def calculate_dagrun_date_fields(
self,
dag: DAG,
most_recent_dag_run: None | datetime | DataInterval,
) -> None:
"""
Calculate ``next_dagrun`` and `next_dagrun_create_after``.
:param dag: The DAG object
:param most_recent_dag_run: DataInterval (or datetime) of most recent run of this dag, or none
if not yet scheduled.
"""
most_recent_data_interval: DataInterval | None
if isinstance(most_recent_dag_run, datetime):
warnings.warn(
"Passing a datetime to `DagModel.calculate_dagrun_date_fields` is deprecated. "
"Provide a data interval instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
most_recent_data_interval = dag.infer_automated_data_interval(most_recent_dag_run)
else:
most_recent_data_interval = most_recent_dag_run
next_dagrun_info = dag.next_dagrun_info(most_recent_data_interval)
if next_dagrun_info is None:
self.next_dagrun_data_interval = self.next_dagrun = self.next_dagrun_create_after = None
else:
self.next_dagrun_data_interval = next_dagrun_info.data_interval
self.next_dagrun = next_dagrun_info.logical_date
self.next_dagrun_create_after = next_dagrun_info.run_after
log.info(
"Setting next_dagrun for %s to %s, run_after=%s",
dag.dag_id,
self.next_dagrun,
self.next_dagrun_create_after,
)
@provide_session
def get_dataset_triggered_next_run_info(self, *, session=NEW_SESSION) -> dict[str, int | str] | None:
if self.schedule_interval != "Dataset":
return None
return get_dataset_triggered_next_run_info([self.dag_id], session=session)[self.dag_id]
# NOTE: Please keep the list of arguments in sync with DAG.__init__.
# Only exception: dag_id here should have a default value, but not in DAG.
def dag(
dag_id: str = "",
description: str | None = None,
schedule: ScheduleArg = NOTSET,
schedule_interval: ScheduleIntervalArg = NOTSET,
timetable: Timetable | None = None,
start_date: datetime | None = None,
end_date: datetime | None = None,
full_filepath: str | None = None,
template_searchpath: str | Iterable[str] | None = None,
template_undefined: type[jinja2.StrictUndefined] = jinja2.StrictUndefined,
user_defined_macros: dict | None = None,
user_defined_filters: dict | None = None,
default_args: dict | None = None,
concurrency: int | None = None,
max_active_tasks: int = airflow_conf.getint("core", "max_active_tasks_per_dag"),
max_active_runs: int = airflow_conf.getint("core", "max_active_runs_per_dag"),
dagrun_timeout: timedelta | None = None,
sla_miss_callback: None | SLAMissCallback | list[SLAMissCallback] = None,
default_view: str = airflow_conf.get_mandatory_value("webserver", "dag_default_view").lower(),
orientation: str = airflow_conf.get_mandatory_value("webserver", "dag_orientation"),
catchup: bool = airflow_conf.getboolean("scheduler", "catchup_by_default"),
on_success_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None,
on_failure_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None,
doc_md: str | None = None,
params: collections.abc.MutableMapping | None = None,
access_control: dict | None = None,
is_paused_upon_creation: bool | None = None,
jinja_environment_kwargs: dict | None = None,
render_template_as_native_obj: bool = False,
tags: list[str] | None = None,
owner_links: dict[str, str] | None = None,
auto_register: bool = True,
fail_stop: bool = False,
) -> Callable[[Callable], Callable[..., DAG]]:
"""
Python dag decorator. Wraps a function into an Airflow DAG.
Accepts kwargs for operator kwarg. Can be used to parameterize DAGs.
:param dag_args: Arguments for DAG object
:param dag_kwargs: Kwargs for DAG object.
"""
def wrapper(f: Callable) -> Callable[..., DAG]:
@functools.wraps(f)
def factory(*args, **kwargs):
# Generate signature for decorated function and bind the arguments when called
# we do this to extract parameters, so we can annotate them on the DAG object.
# In addition, this fails if we are missing any args/kwargs with TypeError as expected.
f_sig = signature(f).bind(*args, **kwargs)
# Apply defaults to capture default values if set.
f_sig.apply_defaults()
# Initialize DAG with bound arguments
with DAG(
dag_id or f.__name__,
description=description,
schedule_interval=schedule_interval,
timetable=timetable,
start_date=start_date,
end_date=end_date,
full_filepath=full_filepath,
template_searchpath=template_searchpath,
template_undefined=template_undefined,
user_defined_macros=user_defined_macros,
user_defined_filters=user_defined_filters,
default_args=default_args,
concurrency=concurrency,
max_active_tasks=max_active_tasks,
max_active_runs=max_active_runs,
dagrun_timeout=dagrun_timeout,
sla_miss_callback=sla_miss_callback,
default_view=default_view,
orientation=orientation,
catchup=catchup,
on_success_callback=on_success_callback,
on_failure_callback=on_failure_callback,
doc_md=doc_md,
params=params,
access_control=access_control,
is_paused_upon_creation=is_paused_upon_creation,
jinja_environment_kwargs=jinja_environment_kwargs,
render_template_as_native_obj=render_template_as_native_obj,
tags=tags,
schedule=schedule,
owner_links=owner_links,
auto_register=auto_register,
fail_stop=fail_stop,
) as dag_obj:
# Set DAG documentation from function documentation if it exists and doc_md is not set.
if f.__doc__ and not dag_obj.doc_md:
dag_obj.doc_md = f.__doc__
# Generate DAGParam for each function arg/kwarg and replace it for calling the function.
# All args/kwargs for function will be DAGParam object and replaced on execution time.
f_kwargs = {}
for name, value in f_sig.arguments.items():
f_kwargs[name] = dag_obj.param(name, value)
# set file location to caller source path
back = sys._getframe().f_back
dag_obj.fileloc = back.f_code.co_filename if back else ""
# Invoke function to create operators in the DAG scope.
f(**f_kwargs)
# Return dag object such that it's accessible in Globals.
return dag_obj
# Ensure that warnings from inside DAG() are emitted from the caller, not here
fixup_decorator_warning_stack(factory)
return factory
return wrapper
STATICA_HACK = True
globals()["kcah_acitats"[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
from airflow.models.serialized_dag import SerializedDagModel
DagModel.serialized_dag = relationship(SerializedDagModel)
""":sphinx-autoapi-skip:"""
class DagContext:
"""
DAG context is used to keep the current DAG when DAG is used as ContextManager.
You can use DAG as context:
.. code-block:: python
with DAG(
dag_id="example_dag",
default_args=default_args,
schedule="0 0 * * *",
dagrun_timeout=timedelta(minutes=60),
) as dag:
...
If you do this the context stores the DAG and whenever new task is created, it will use
such stored DAG as the parent DAG.
"""
_context_managed_dags: collections.deque[DAG] = deque()
autoregistered_dags: set[tuple[DAG, ModuleType]] = set()
current_autoregister_module_name: str | None = None
@classmethod
def push_context_managed_dag(cls, dag: DAG):
cls._context_managed_dags.appendleft(dag)
@classmethod
def pop_context_managed_dag(cls) -> DAG | None:
dag = cls._context_managed_dags.popleft()
# In a few cases around serialization we explicitly push None in to the stack
if cls.current_autoregister_module_name is not None and dag and dag.auto_register:
mod = sys.modules[cls.current_autoregister_module_name]
cls.autoregistered_dags.add((dag, mod))
return dag
@classmethod
def get_current_dag(cls) -> DAG | None:
try:
return cls._context_managed_dags[0]
except IndexError:
return None
def _run_task(ti: TaskInstance, session):
"""
Run a single task instance, and push result to Xcom for downstream tasks. Bypasses a lot of
extra steps used in `task.run` to keep our local running as fast as possible
This function is only meant for the `dag.test` function as a helper function.
Args:
ti: TaskInstance to run
"""
log.info("*****************************************************")
if ti.map_index > 0:
log.info("Running task %s index %d", ti.task_id, ti.map_index)
else:
log.info("Running task %s", ti.task_id)
try:
ti._run_raw_task(session=session)
session.flush()
log.info("%s ran successfully!", ti.task_id)
except AirflowSkipException:
log.info("Task Skipped, continuing")
log.info("*****************************************************")
def _get_or_create_dagrun(
dag: DAG,
conf: dict[Any, Any] | None,
start_date: datetime,
execution_date: datetime,
run_id: str,
session: Session,
data_interval: tuple[datetime, datetime] | None = None,
) -> DagRun:
"""Create a DAG run, replacing an existing instance if needed to prevent collisions.
This function is only meant to be used by :meth:`DAG.test` as a helper function.
:param dag: DAG to be used to find run.
:param conf: Configuration to pass to newly created run.
:param start_date: Start date of new run.
:param execution_date: Logical date for finding an existing run.
:param run_id: Run ID for the new DAG run.
:return: The newly created DAG run.
"""
log.info("dagrun id: %s", dag.dag_id)
dr: DagRun = session.scalar(
select(DagRun).where(DagRun.dag_id == dag.dag_id, DagRun.execution_date == execution_date)
)
if dr:
session.delete(dr)
session.commit()
dr = dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=execution_date,
run_id=run_id,
start_date=start_date or execution_date,
session=session,
conf=conf,
data_interval=data_interval,
)
log.info("created dagrun %s", dr)
return dr
| 159,605 | 39.550305 | 110 |
py
|
airflow
|
airflow-main/airflow/models/dagparam.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.models.param`."""
from __future__ import annotations
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.models.param import DagParam # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.models.param`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
| 1,152 | 37.433333 | 72 |
py
|
airflow
|
airflow-main/airflow/models/xcom_arg.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import inspect
from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, Mapping, Sequence, Union, overload
from sqlalchemy import func, or_
from sqlalchemy.orm import Session
from airflow.exceptions import AirflowException, XComNotFound
from airflow.models.abstractoperator import AbstractOperator
from airflow.models.baseoperator import BaseOperator
from airflow.models.mappedoperator import MappedOperator
from airflow.models.taskmixin import DAGNode, DependencyMixin
from airflow.utils.context import Context
from airflow.utils.edgemodifier import EdgeModifier
from airflow.utils.mixins import ResolveMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.setup_teardown import SetupTeardownContext
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import NOTSET, ArgNotSet
from airflow.utils.xcom import XCOM_RETURN_KEY
if TYPE_CHECKING:
from airflow.models.dag import DAG
from airflow.models.operator import Operator
from airflow.utils.task_group import TaskGroup
# Callable objects contained by MapXComArg. We only accept callables from
# the user, but deserialize them into strings in a serialized XComArg for
# safety (those callables are arbitrary user code).
MapCallables = Sequence[Union[Callable[[Any], Any], str]]
class XComArg(ResolveMixin, DependencyMixin):
"""Reference to an XCom value pushed from another operator.
The implementation supports::
xcomarg >> op
xcomarg << op
op >> xcomarg # By BaseOperator code
op << xcomarg # By BaseOperator code
**Example**: The moment you get a result from any operator (decorated or regular) you can ::
any_op = AnyOperator()
xcomarg = XComArg(any_op)
# or equivalently
xcomarg = any_op.output
my_op = MyOperator()
my_op >> xcomarg
This object can be used in legacy Operators via Jinja.
**Example**: You can make this result to be part of any generated string::
any_op = AnyOperator()
xcomarg = any_op.output
op1 = MyOperator(my_text_message=f"the value is {xcomarg}")
op2 = MyOperator(my_text_message=f"the value is {xcomarg['topic']}")
:param operator: Operator instance to which the XComArg references.
:param key: Key used to pull the XCom value. Defaults to *XCOM_RETURN_KEY*,
i.e. the referenced operator's return value.
"""
@overload
def __new__(cls: type[XComArg], operator: Operator, key: str = XCOM_RETURN_KEY) -> XComArg:
"""Called when the user writes ``XComArg(...)`` directly."""
@overload
def __new__(cls: type[XComArg]) -> XComArg:
"""Called by Python internals from subclasses."""
def __new__(cls, *args, **kwargs) -> XComArg:
if cls is XComArg:
return PlainXComArg(*args, **kwargs)
return super().__new__(cls)
@staticmethod
def iter_xcom_references(arg: Any) -> Iterator[tuple[Operator, str]]:
"""Return XCom references in an arbitrary value.
Recursively traverse ``arg`` and look for XComArg instances in any
collection objects, and instances with ``template_fields`` set.
"""
if isinstance(arg, ResolveMixin):
yield from arg.iter_references()
elif isinstance(arg, (tuple, set, list)):
for elem in arg:
yield from XComArg.iter_xcom_references(elem)
elif isinstance(arg, dict):
for elem in arg.values():
yield from XComArg.iter_xcom_references(elem)
elif isinstance(arg, AbstractOperator):
for attr in arg.template_fields:
yield from XComArg.iter_xcom_references(getattr(arg, attr))
@staticmethod
def apply_upstream_relationship(op: Operator, arg: Any):
"""Set dependency for XComArgs.
This looks for XComArg objects in ``arg`` "deeply" (looking inside
collections objects and classes decorated with ``template_fields``), and
sets the relationship to ``op`` on any found.
"""
for operator, _ in XComArg.iter_xcom_references(arg):
op.set_upstream(operator)
@property
def roots(self) -> list[DAGNode]:
"""Required by TaskMixin."""
return [op for op, _ in self.iter_references()]
@property
def leaves(self) -> list[DAGNode]:
"""Required by TaskMixin."""
return [op for op, _ in self.iter_references()]
def set_upstream(
self,
task_or_task_list: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
):
"""Proxy to underlying operator set_upstream method. Required by TaskMixin."""
for operator, _ in self.iter_references():
operator.set_upstream(task_or_task_list, edge_modifier)
def set_downstream(
self,
task_or_task_list: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
):
"""Proxy to underlying operator set_downstream method. Required by TaskMixin."""
for operator, _ in self.iter_references():
operator.set_downstream(task_or_task_list, edge_modifier)
def _serialize(self) -> dict[str, Any]:
"""Called by DAG serialization.
The implementation should be the inverse function to ``deserialize``,
returning a data dict converted from this XComArg derivative. DAG
serialization does not call this directly, but ``serialize_xcom_arg``
instead, which adds additional information to dispatch deserialization
to the correct class.
"""
raise NotImplementedError()
@classmethod
def _deserialize(cls, data: dict[str, Any], dag: DAG) -> XComArg:
"""Called when deserializing a DAG.
The implementation should be the inverse function to ``serialize``,
implementing given a data dict converted from this XComArg derivative,
how the original XComArg should be created. DAG serialization relies on
additional information added in ``serialize_xcom_arg`` to dispatch data
dicts to the correct ``_deserialize`` information, so this function does
not need to validate whether the incoming data contains correct keys.
"""
raise NotImplementedError()
def map(self, f: Callable[[Any], Any]) -> MapXComArg:
return MapXComArg(self, [f])
def zip(self, *others: XComArg, fillvalue: Any = NOTSET) -> ZipXComArg:
return ZipXComArg([self, *others], fillvalue=fillvalue)
def get_task_map_length(self, run_id: str, *, session: Session) -> int | None:
"""Inspect length of pushed value for task-mapping.
This is used to determine how many task instances the scheduler should
create for a downstream using this XComArg for task-mapping.
*None* may be returned if the depended XCom has not been pushed.
"""
raise NotImplementedError()
@provide_session
def resolve(self, context: Context, session: Session = NEW_SESSION) -> Any:
"""Pull XCom value.
This should only be called during ``op.execute()`` with an appropriate
context (e.g. generated from ``TaskInstance.get_template_context()``).
Although the ``ResolveMixin`` parent mixin also has a ``resolve``
protocol, this adds the optional ``session`` argument that some of the
subclasses need.
:meta private:
"""
raise NotImplementedError()
def add_to_taskgroup(self, task_group: TaskGroup) -> None:
"""Add the task to the given task group.
:meta private:
"""
for op, _ in self.iter_references():
if op.node_id not in task_group.children:
task_group.add(op)
def __enter__(self):
if not self.operator.is_setup and not self.operator.is_teardown:
raise AirflowException("Only setup/teardown tasks can be used as context managers.")
SetupTeardownContext.push_setup_teardown_task(self.operator)
return SetupTeardownContext
def __exit__(self, exc_type, exc_val, exc_tb):
SetupTeardownContext.set_work_task_roots_and_leaves()
class PlainXComArg(XComArg):
"""Reference to one single XCom without any additional semantics.
This class should not be accessed directly, but only through XComArg. The
class inheritance chain and ``__new__`` is implemented in this slightly
convoluted way because we want to
a. Allow the user to continue using XComArg directly for the simple
semantics (see documentation of the base class for details).
b. Make ``isinstance(thing, XComArg)`` be able to detect all kinds of XCom
references.
c. Not allow many properties of PlainXComArg (including ``__getitem__`` and
``__str__``) to exist on other kinds of XComArg implementations since
they don't make sense.
:meta private:
"""
def __init__(self, operator: Operator, key: str = XCOM_RETURN_KEY):
self.operator = operator
self.key = key
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PlainXComArg):
return NotImplemented
return self.operator == other.operator and self.key == other.key
def __getitem__(self, item: str) -> XComArg:
"""Implements xcomresult['some_result_key']."""
if not isinstance(item, str):
raise ValueError(f"XComArg only supports str lookup, received {type(item).__name__}")
return PlainXComArg(operator=self.operator, key=item)
def __iter__(self):
"""Override iterable protocol to raise error explicitly.
The default ``__iter__`` implementation in Python calls ``__getitem__``
with 0, 1, 2, etc. until it hits an ``IndexError``. This does not work
well with our custom ``__getitem__`` implementation, and results in poor
DAG-writing experience since a misplaced ``*`` expansion would create an
infinite loop consuming the entire DAG parser.
This override catches the error eagerly, so an incorrectly implemented
DAG fails fast and avoids wasting resources on nonsensical iterating.
"""
raise TypeError("'XComArg' object is not iterable")
def __repr__(self) -> str:
if self.key == XCOM_RETURN_KEY:
return f"XComArg({self.operator!r})"
return f"XComArg({self.operator!r}, {self.key!r})"
def __str__(self) -> str:
"""
Backward compatibility for old-style jinja used in Airflow Operators.
**Example**: to use XComArg at BashOperator::
BashOperator(cmd=f"... { xcomarg } ...")
:return:
"""
xcom_pull_kwargs = [
f"task_ids='{self.operator.task_id}'",
f"dag_id='{self.operator.dag_id}'",
]
if self.key is not None:
xcom_pull_kwargs.append(f"key='{self.key}'")
xcom_pull_str = ", ".join(xcom_pull_kwargs)
# {{{{ are required for escape {{ in f-string
xcom_pull = f"{{{{ task_instance.xcom_pull({xcom_pull_str}) }}}}"
return xcom_pull
def _serialize(self) -> dict[str, Any]:
return {"task_id": self.operator.task_id, "key": self.key}
@classmethod
def _deserialize(cls, data: dict[str, Any], dag: DAG) -> XComArg:
return cls(dag.get_task(data["task_id"]), data["key"])
@property
def is_setup(self) -> bool:
return self.operator.is_setup
@is_setup.setter
def is_setup(self, val: bool):
self.operator.is_setup = val
@property
def is_teardown(self) -> bool:
return self.operator.is_teardown
@is_teardown.setter
def is_teardown(self, val: bool):
self.operator.is_teardown = val
@property
def on_failure_fail_dagrun(self) -> bool:
return self.operator.on_failure_fail_dagrun
@on_failure_fail_dagrun.setter
def on_failure_fail_dagrun(self, val: bool):
self.operator.on_failure_fail_dagrun = val
def as_setup(self) -> DependencyMixin:
for operator, _ in self.iter_references():
operator.is_setup = True
return self
def as_teardown(
self,
*,
setups: BaseOperator | Iterable[BaseOperator] | ArgNotSet = NOTSET,
on_failure_fail_dagrun=NOTSET,
):
for operator, _ in self.iter_references():
operator.is_teardown = True
if TYPE_CHECKING:
assert isinstance(operator, BaseOperator) # Can't set MappedOperator as teardown
operator.trigger_rule = TriggerRule.ALL_DONE_SETUP_SUCCESS
if on_failure_fail_dagrun is not NOTSET:
operator.on_failure_fail_dagrun = on_failure_fail_dagrun
if not isinstance(setups, ArgNotSet):
setups = [setups] if isinstance(setups, DependencyMixin) else setups
for s in setups:
s.is_setup = True
s >> operator
return self
def iter_references(self) -> Iterator[tuple[Operator, str]]:
yield self.operator, self.key
def map(self, f: Callable[[Any], Any]) -> MapXComArg:
if self.key != XCOM_RETURN_KEY:
raise ValueError("cannot map against non-return XCom")
return super().map(f)
def zip(self, *others: XComArg, fillvalue: Any = NOTSET) -> ZipXComArg:
if self.key != XCOM_RETURN_KEY:
raise ValueError("cannot map against non-return XCom")
return super().zip(*others, fillvalue=fillvalue)
def get_task_map_length(self, run_id: str, *, session: Session) -> int | None:
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskmap import TaskMap
from airflow.models.xcom import XCom
task = self.operator
if isinstance(task, MappedOperator):
unfinished_ti_count_query = session.query(func.count(TaskInstance.map_index)).filter(
TaskInstance.dag_id == task.dag_id,
TaskInstance.run_id == run_id,
TaskInstance.task_id == task.task_id,
# Special NULL treatment is needed because 'state' can be NULL.
# The "IN" part would produce "NULL NOT IN ..." and eventually
# "NULl = NULL", which is a big no-no in SQL.
or_(
TaskInstance.state.is_(None),
TaskInstance.state.in_(s.value for s in State.unfinished if s is not None),
),
)
if unfinished_ti_count_query.scalar():
return None # Not all of the expanded tis are done yet.
query = session.query(func.count(XCom.map_index)).filter(
XCom.dag_id == task.dag_id,
XCom.run_id == run_id,
XCom.task_id == task.task_id,
XCom.map_index >= 0,
XCom.key == XCOM_RETURN_KEY,
)
else:
query = session.query(TaskMap.length).filter(
TaskMap.dag_id == task.dag_id,
TaskMap.run_id == run_id,
TaskMap.task_id == task.task_id,
TaskMap.map_index < 0,
)
return query.scalar()
@provide_session
def resolve(self, context: Context, session: Session = NEW_SESSION) -> Any:
from airflow.models.taskinstance import TaskInstance
ti = context["ti"]
assert isinstance(ti, TaskInstance), "Wait for AIP-44 implementation to complete"
task_id = self.operator.task_id
map_indexes = ti.get_relevant_upstream_map_indexes(
self.operator,
context["expanded_ti_count"],
session=session,
)
result = ti.xcom_pull(
task_ids=task_id,
map_indexes=map_indexes,
key=self.key,
default=NOTSET,
session=session,
)
if not isinstance(result, ArgNotSet):
return result
if self.key == XCOM_RETURN_KEY:
return None
if getattr(self.operator, "multiple_outputs", False):
# If the operator is set to have multiple outputs and it was not executed,
# we should return "None" instead of showing an error. This is because when
# multiple outputs XComs are created, the XCom keys associated with them will have
# different names than the predefined "XCOM_RETURN_KEY" and won't be found.
# Therefore, it's better to return "None" like we did above where self.key==XCOM_RETURN_KEY.
return None
raise XComNotFound(ti.dag_id, task_id, self.key)
def _get_callable_name(f: Callable | str) -> str:
"""Try to "describe" a callable by getting its name."""
if callable(f):
return f.__name__
# Parse the source to find whatever is behind "def". For safety, we don't
# want to evaluate the code in any meaningful way!
with contextlib.suppress(Exception):
kw, name, _ = f.lstrip().split(None, 2)
if kw == "def":
return name
return "<function>"
class _MapResult(Sequence):
def __init__(self, value: Sequence | dict, callables: MapCallables) -> None:
self.value = value
self.callables = callables
def __getitem__(self, index: Any) -> Any:
value = self.value[index]
# In the worker, we can access all actual callables. Call them.
callables = [f for f in self.callables if callable(f)]
if len(callables) == len(self.callables):
for f in callables:
value = f(value)
return value
# In the scheduler, we don't have access to the actual callables, nor do
# we want to run it since it's arbitrary code. This builds a string to
# represent the call chain in the UI or logs instead.
for v in self.callables:
value = f"{_get_callable_name(v)}({value})"
return value
def __len__(self) -> int:
return len(self.value)
class MapXComArg(XComArg):
"""An XCom reference with ``map()`` call(s) applied.
This is based on an XComArg, but also applies a series of "transforms" that
convert the pulled XCom value.
:meta private:
"""
def __init__(self, arg: XComArg, callables: MapCallables) -> None:
for c in callables:
if getattr(c, "_airflow_is_task_decorator", False):
raise ValueError("map() argument must be a plain function, not a @task operator")
self.arg = arg
self.callables = callables
def __repr__(self) -> str:
map_calls = "".join(f".map({_get_callable_name(f)})" for f in self.callables)
return f"{self.arg!r}{map_calls}"
def _serialize(self) -> dict[str, Any]:
return {
"arg": serialize_xcom_arg(self.arg),
"callables": [inspect.getsource(c) if callable(c) else c for c in self.callables],
}
@classmethod
def _deserialize(cls, data: dict[str, Any], dag: DAG) -> XComArg:
# We are deliberately NOT deserializing the callables. These are shown
# in the UI, and displaying a function object is useless.
return cls(deserialize_xcom_arg(data["arg"], dag), data["callables"])
def iter_references(self) -> Iterator[tuple[Operator, str]]:
yield from self.arg.iter_references()
def map(self, f: Callable[[Any], Any]) -> MapXComArg:
# Flatten arg.map(f1).map(f2) into one MapXComArg.
return MapXComArg(self.arg, [*self.callables, f])
def get_task_map_length(self, run_id: str, *, session: Session) -> int | None:
return self.arg.get_task_map_length(run_id, session=session)
@provide_session
def resolve(self, context: Context, session: Session = NEW_SESSION) -> Any:
value = self.arg.resolve(context, session=session)
if not isinstance(value, (Sequence, dict)):
raise ValueError(f"XCom map expects sequence or dict, not {type(value).__name__}")
return _MapResult(value, self.callables)
class _ZipResult(Sequence):
def __init__(self, values: Sequence[Sequence | dict], *, fillvalue: Any = NOTSET) -> None:
self.values = values
self.fillvalue = fillvalue
@staticmethod
def _get_or_fill(container: Sequence | dict, index: Any, fillvalue: Any) -> Any:
try:
return container[index]
except (IndexError, KeyError):
return fillvalue
def __getitem__(self, index: Any) -> Any:
if index >= len(self):
raise IndexError(index)
return tuple(self._get_or_fill(value, index, self.fillvalue) for value in self.values)
def __len__(self) -> int:
lengths = (len(v) for v in self.values)
if isinstance(self.fillvalue, ArgNotSet):
return min(lengths)
return max(lengths)
class ZipXComArg(XComArg):
"""An XCom reference with ``zip()`` applied.
This is constructed from multiple XComArg instances, and presents an
iterable that "zips" them together like the built-in ``zip()`` (and
``itertools.zip_longest()`` if ``fillvalue`` is provided).
"""
def __init__(self, args: Sequence[XComArg], *, fillvalue: Any = NOTSET) -> None:
if not args:
raise ValueError("At least one input is required")
self.args = args
self.fillvalue = fillvalue
def __repr__(self) -> str:
args_iter = iter(self.args)
first = repr(next(args_iter))
rest = ", ".join(repr(arg) for arg in args_iter)
if isinstance(self.fillvalue, ArgNotSet):
return f"{first}.zip({rest})"
return f"{first}.zip({rest}, fillvalue={self.fillvalue!r})"
def _serialize(self) -> dict[str, Any]:
args = [serialize_xcom_arg(arg) for arg in self.args]
if isinstance(self.fillvalue, ArgNotSet):
return {"args": args}
return {"args": args, "fillvalue": self.fillvalue}
@classmethod
def _deserialize(cls, data: dict[str, Any], dag: DAG) -> XComArg:
return cls(
[deserialize_xcom_arg(arg, dag) for arg in data["args"]],
fillvalue=data.get("fillvalue", NOTSET),
)
def iter_references(self) -> Iterator[tuple[Operator, str]]:
for arg in self.args:
yield from arg.iter_references()
def get_task_map_length(self, run_id: str, *, session: Session) -> int | None:
all_lengths = (arg.get_task_map_length(run_id, session=session) for arg in self.args)
ready_lengths = [length for length in all_lengths if length is not None]
if len(ready_lengths) != len(self.args):
return None # If any of the referenced XComs is not ready, we are not ready either.
if isinstance(self.fillvalue, ArgNotSet):
return min(ready_lengths)
return max(ready_lengths)
@provide_session
def resolve(self, context: Context, session: Session = NEW_SESSION) -> Any:
values = [arg.resolve(context, session=session) for arg in self.args]
for value in values:
if not isinstance(value, (Sequence, dict)):
raise ValueError(f"XCom zip expects sequence or dict, not {type(value).__name__}")
return _ZipResult(values, fillvalue=self.fillvalue)
_XCOM_ARG_TYPES: Mapping[str, type[XComArg]] = {
"": PlainXComArg,
"map": MapXComArg,
"zip": ZipXComArg,
}
def serialize_xcom_arg(value: XComArg) -> dict[str, Any]:
"""DAG serialization interface."""
key = next(k for k, v in _XCOM_ARG_TYPES.items() if v == type(value))
if key:
return {"type": key, **value._serialize()}
return value._serialize()
def deserialize_xcom_arg(data: dict[str, Any], dag: DAG) -> XComArg:
"""DAG serialization interface."""
klass = _XCOM_ARG_TYPES[data.get("type", "")]
return klass._deserialize(data, dag)
| 24,864 | 38.281201 | 104 |
py
|
airflow
|
airflow-main/airflow/models/dagpickle.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
import dill
from sqlalchemy import BigInteger, Column, Integer, PickleType
from airflow.models.base import Base
from airflow.utils import timezone
from airflow.utils.sqlalchemy import UtcDateTime
if TYPE_CHECKING:
from airflow.models.dag import DAG
class DagPickle(Base):
"""
Dags can originate from different places (user repos, main repo, ...)
and also get executed in different places (different executors). This
object represents a version of a DAG and becomes a source of truth for
a BackfillJob execution. A pickle is a native python serialized object,
and in this case gets stored in the database for the duration of the job.
The executors pick up the DagPickle id and read the dag definition from
the database.
"""
id = Column(Integer, primary_key=True)
pickle = Column(PickleType(pickler=dill))
created_dttm = Column(UtcDateTime, default=timezone.utcnow)
pickle_hash = Column(BigInteger)
__tablename__ = "dag_pickle"
def __init__(self, dag: DAG) -> None:
self.dag_id = dag.dag_id
if hasattr(dag, "template_env"):
dag.template_env = None # type: ignore[attr-defined]
self.pickle_hash = hash(dag)
self.pickle = dag
| 2,096 | 35.155172 | 77 |
py
|
airflow
|
airflow-main/airflow/models/dagbag.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import hashlib
import importlib
import importlib.machinery
import importlib.util
import os
import sys
import textwrap
import traceback
import warnings
import zipfile
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, NamedTuple
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import Session
from tabulate import tabulate
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import (
AirflowClusterPolicyError,
AirflowClusterPolicySkipDag,
AirflowClusterPolicyViolation,
AirflowDagCycleException,
AirflowDagDuplicatedIdException,
RemovedInAirflow3Warning,
)
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.dag_cycle_tester import check_cycle
from airflow.utils.docs import get_docs_url
from airflow.utils.file import correct_maybe_zipped, list_py_file_paths, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.retries import MAX_DB_RETRIES, run_with_db_retries
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.timeout import timeout
from airflow.utils.types import NOTSET, ArgNotSet
if TYPE_CHECKING:
import pathlib
from airflow.models.dag import DAG
class FileLoadStat(NamedTuple):
"""Information about single file."""
file: str
duration: timedelta
dag_num: int
task_num: int
dags: str
class DagBag(LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:param include_examples: whether to include the examples that ship
with airflow or not
:param read_dags_from_db: Read DAGs from DB if ``True`` is passed.
If ``False`` DAGs are read from python files.
:param load_op_links: Should the extra operator link be loaded via plugins when
de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links
are not loaded to not run User code in Scheduler.
"""
def __init__(
self,
dag_folder: str | pathlib.Path | None = None,
include_examples: bool | ArgNotSet = NOTSET,
safe_mode: bool | ArgNotSet = NOTSET,
read_dags_from_db: bool = False,
store_serialized_dags: bool | None = None,
load_op_links: bool = True,
collect_dags: bool = True,
):
# Avoid circular import
super().__init__()
include_examples = (
include_examples
if isinstance(include_examples, bool)
else conf.getboolean("core", "LOAD_EXAMPLES")
)
safe_mode = (
safe_mode if isinstance(safe_mode, bool) else conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE")
)
if store_serialized_dags:
warnings.warn(
"The store_serialized_dags parameter has been deprecated. "
"You should pass the read_dags_from_db parameter.",
RemovedInAirflow3Warning,
stacklevel=2,
)
read_dags_from_db = store_serialized_dags
dag_folder = dag_folder or settings.DAGS_FOLDER
self.dag_folder = dag_folder
self.dags: dict[str, DAG] = {}
# the file's last modified timestamp when we last read it
self.file_last_changed: dict[str, datetime] = {}
self.import_errors: dict[str, str] = {}
self.has_logged = False
self.read_dags_from_db = read_dags_from_db
# Only used by read_dags_from_db=True
self.dags_last_fetched: dict[str, datetime] = {}
# Only used by SchedulerJob to compare the dag_hash to identify change in DAGs
self.dags_hash: dict[str, str] = {}
self.dagbag_import_error_tracebacks = conf.getboolean("core", "dagbag_import_error_tracebacks")
self.dagbag_import_error_traceback_depth = conf.getint("core", "dagbag_import_error_traceback_depth")
if collect_dags:
self.collect_dags(
dag_folder=dag_folder,
include_examples=include_examples,
safe_mode=safe_mode,
)
# Should the extra operator link be loaded via plugins?
# This flag is set to False in Scheduler so that Extra Operator links are not loaded
self.load_op_links = load_op_links
def size(self) -> int:
""":return: the amount of dags contained in this dagbag"""
return len(self.dags)
@property
def store_serialized_dags(self) -> bool:
"""Whether to read dags from DB."""
warnings.warn(
"The store_serialized_dags property has been deprecated. Use read_dags_from_db instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.read_dags_from_db
@property
def dag_ids(self) -> list[str]:
"""
Get DAG ids.
:return: a list of DAG IDs in this bag
"""
return list(self.dags.keys())
@provide_session
def get_dag(self, dag_id, session: Session = None):
"""
Gets the DAG out of the dictionary, and refreshes it if expired.
:param dag_id: DAG ID
"""
# Avoid circular import
from airflow.models.dag import DagModel
if self.read_dags_from_db:
# Import here so that serialized dag is only imported when serialization is enabled
from airflow.models.serialized_dag import SerializedDagModel
if dag_id not in self.dags:
# Load from DB if not (yet) in the bag
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If DAG is in the DagBag, check the following
# 1. if time has come to check if DAG is updated (controlled by min_serialized_dag_fetch_secs)
# 2. check the last_updated and hash columns in SerializedDag table to see if
# Serialized DAG is updated
# 3. if (2) is yes, fetch the Serialized DAG.
# 4. if (2) returns None (i.e. Serialized DAG is deleted), remove dag from dagbag
# if it exists and return None.
min_serialized_dag_fetch_secs = timedelta(seconds=settings.MIN_SERIALIZED_DAG_FETCH_INTERVAL)
if (
dag_id in self.dags_last_fetched
and timezone.utcnow() > self.dags_last_fetched[dag_id] + min_serialized_dag_fetch_secs
):
sd_latest_version_and_updated_datetime = (
SerializedDagModel.get_latest_version_hash_and_updated_datetime(
dag_id=dag_id, session=session
)
)
if not sd_latest_version_and_updated_datetime:
self.log.warning("Serialized DAG %s no longer exists", dag_id)
del self.dags[dag_id]
del self.dags_last_fetched[dag_id]
del self.dags_hash[dag_id]
return None
sd_latest_version, sd_last_updated_datetime = sd_latest_version_and_updated_datetime
if (
sd_last_updated_datetime > self.dags_last_fetched[dag_id]
or sd_latest_version != self.dags_hash[dag_id]
):
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If asking for a known subdag, we want to refresh the parent
dag = None
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.parent_dag:
root_dag_id = dag.parent_dag.dag_id
# If DAG Model is absent, we can't check last_expired property. Is the DAG not yet synchronized?
orm_dag = DagModel.get_current(root_dag_id, session=session)
if not orm_dag:
return self.dags.get(dag_id)
# If the dag corresponding to root_dag_id is absent or expired
is_missing = root_dag_id not in self.dags
is_expired = orm_dag.last_expired and dag and dag.last_loaded < orm_dag.last_expired
if is_expired:
# Remove associated dags so we can re-add them.
self.dags = {
key: dag
for key, dag in self.dags.items()
if root_dag_id != key and not (dag.parent_dag and root_dag_id == dag.parent_dag.dag_id)
}
if is_missing or is_expired:
# Reprocess source file.
found_dags = self.process_file(
filepath=correct_maybe_zipped(orm_dag.fileloc), only_if_updated=False
)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
def _add_dag_from_db(self, dag_id: str, session: Session):
"""Add DAG to DagBag from DB."""
from airflow.models.serialized_dag import SerializedDagModel
row = SerializedDagModel.get(dag_id, session)
if not row:
return None
row.load_op_links = self.load_op_links
dag = row.dag
for subdag in dag.subdags:
self.dags[subdag.dag_id] = subdag
self.dags[dag.dag_id] = dag
self.dags_last_fetched[dag.dag_id] = timezone.utcnow()
self.dags_hash[dag.dag_id] = row.dag_hash
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module or zip file, this method imports
the module and look for dag objects within it.
"""
from airflow.models.dag import DagContext
# if the source file no longer exists in the DB or in the filesystem,
# return an empty list
# todo: raise exception?
if filepath is None or not os.path.isfile(filepath):
return []
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if (
only_if_updated
and filepath in self.file_last_changed
and file_last_changed_on_disk == self.file_last_changed[filepath]
):
return []
except Exception as e:
self.log.exception(e)
return []
# Ensure we don't pick up anything else we didn't mean to
DagContext.autoregistered_dags.clear()
if filepath.endswith(".py") or not zipfile.is_zipfile(filepath):
mods = self._load_modules_from_file(filepath, safe_mode)
else:
mods = self._load_modules_from_zip(filepath, safe_mode)
found_dags = self._process_modules(filepath, mods, file_last_changed_on_disk)
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
def _load_modules_from_file(self, filepath, safe_mode):
from airflow.models.dag import DagContext
if not might_contain_dag(filepath, safe_mode):
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info("File %s assumed to contain no DAGs. Skipping.", filepath)
return []
self.log.debug("Importing %s", filepath)
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
path_hash = hashlib.sha1(filepath.encode("utf-8")).hexdigest()
mod_name = f"unusual_prefix_{path_hash}_{org_mod_name}"
if mod_name in sys.modules:
del sys.modules[mod_name]
DagContext.current_autoregister_module_name = mod_name
def parse(mod_name, filepath):
try:
loader = importlib.machinery.SourceFileLoader(mod_name, filepath)
spec = importlib.util.spec_from_loader(mod_name, loader)
new_module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = new_module
loader.exec_module(new_module)
return [new_module]
except Exception as e:
DagContext.autoregistered_dags.clear()
self.log.exception("Failed to import: %s", filepath)
if self.dagbag_import_error_tracebacks:
self.import_errors[filepath] = traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
)
else:
self.import_errors[filepath] = str(e)
return []
dagbag_import_timeout = settings.get_dagbag_import_timeout(filepath)
if not isinstance(dagbag_import_timeout, (int, float)):
raise TypeError(
f"Value ({dagbag_import_timeout}) from get_dagbag_import_timeout must be int or float"
)
if dagbag_import_timeout <= 0: # no parsing timeout
return parse(mod_name, filepath)
timeout_msg = (
f"DagBag import timeout for {filepath} after {dagbag_import_timeout}s.\n"
"Please take a look at these docs to improve your DAG import time:\n"
f"* {get_docs_url('best-practices.html#top-level-python-code')}\n"
f"* {get_docs_url('best-practices.html#reducing-dag-complexity')}"
)
with timeout(dagbag_import_timeout, error_message=timeout_msg):
return parse(mod_name, filepath)
def _load_modules_from_zip(self, filepath, safe_mode):
from airflow.models.dag import DagContext
mods = []
with zipfile.ZipFile(filepath) as current_zip_file:
for zip_info in current_zip_file.infolist():
head, _ = os.path.split(zip_info.filename)
mod_name, ext = os.path.splitext(zip_info.filename)
if ext not in [".py", ".pyc"]:
continue
if head:
continue
if mod_name == "__init__":
self.log.warning("Found __init__.%s at root of %s", ext, filepath)
self.log.debug("Reading %s from %s", zip_info.filename, filepath)
if not might_contain_dag(zip_info.filename, safe_mode, current_zip_file):
# todo: create ignore list
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info(
"File %s:%s assumed to contain no DAGs. Skipping.", filepath, zip_info.filename
)
continue
if mod_name in sys.modules:
del sys.modules[mod_name]
DagContext.current_autoregister_module_name = mod_name
try:
sys.path.insert(0, filepath)
current_module = importlib.import_module(mod_name)
mods.append(current_module)
except Exception as e:
DagContext.autoregistered_dags.clear()
fileloc = os.path.join(filepath, zip_info.filename)
self.log.exception("Failed to import: %s", fileloc)
if self.dagbag_import_error_tracebacks:
self.import_errors[fileloc] = traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
)
else:
self.import_errors[fileloc] = str(e)
finally:
if sys.path[0] == filepath:
del sys.path[0]
return mods
def _process_modules(self, filepath, mods, file_last_changed_on_disk):
from airflow.models.dag import DAG, DagContext # Avoid circular import
top_level_dags = {(o, m) for m in mods for o in m.__dict__.values() if isinstance(o, DAG)}
top_level_dags.update(DagContext.autoregistered_dags)
DagContext.current_autoregister_module_name = None
DagContext.autoregistered_dags.clear()
found_dags = []
for (dag, mod) in top_level_dags:
dag.fileloc = mod.__file__
try:
dag.validate()
self.bag_dag(dag=dag, root_dag=dag)
except AirflowClusterPolicySkipDag:
pass
except Exception as e:
self.log.exception("Failed to bag_dag: %s", dag.fileloc)
self.import_errors[dag.fileloc] = f"{type(e).__name__}: {e}"
self.file_last_changed[dag.fileloc] = file_last_changed_on_disk
else:
found_dags.append(dag)
found_dags += dag.subdags
return found_dags
def bag_dag(self, dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
:raises: AirflowDagCycleException if a cycle is detected in this dag or its subdags.
:raises: AirflowDagDuplicatedIdException if this dag or its subdags already exists in the bag.
"""
self._bag_dag(dag=dag, root_dag=root_dag, recursive=True)
def _bag_dag(self, *, dag, root_dag, recursive):
"""Actual implementation of bagging a dag.
The only purpose of this is to avoid exposing ``recursive`` in ``bag_dag()``,
intended to only be used by the ``_bag_dag()`` implementation.
"""
check_cycle(dag) # throws if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
try:
# Check policies
settings.dag_policy(dag)
for task in dag.tasks:
settings.task_policy(task)
except (AirflowClusterPolicyViolation, AirflowClusterPolicySkipDag):
raise
except Exception as e:
self.log.exception(e)
raise AirflowClusterPolicyError(e)
subdags = dag.subdags
try:
# DAG.subdags automatically performs DFS search, so we don't recurse
# into further _bag_dag() calls.
if recursive:
for subdag in subdags:
subdag.fileloc = dag.fileloc
subdag.parent_dag = dag
self._bag_dag(dag=subdag, root_dag=root_dag, recursive=False)
prev_dag = self.dags.get(dag.dag_id)
if prev_dag and prev_dag.fileloc != dag.fileloc:
raise AirflowDagDuplicatedIdException(
dag_id=dag.dag_id,
incoming=dag.fileloc,
existing=self.dags[dag.dag_id].fileloc,
)
self.dags[dag.dag_id] = dag
self.log.debug("Loaded DAG %s", dag)
except (AirflowDagCycleException, AirflowDagDuplicatedIdException):
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception("Exception bagging dag: %s", dag.dag_id)
# Only necessary at the root level since DAG.subdags automatically
# performs DFS to search through all subdags
if recursive:
for subdag in subdags:
if subdag.dag_id in self.dags:
del self.dags[subdag.dag_id]
raise
def collect_dags(
self,
dag_folder: str | pathlib.Path | None = None,
only_if_updated: bool = True,
include_examples: bool = conf.getboolean("core", "LOAD_EXAMPLES"),
safe_mode: bool = conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE"),
):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a ``.airflowignore`` file is found while processing
the directory, it will behave much like a ``.gitignore``,
ignoring files that match any of the patterns specified
in the file.
**Note**: The patterns in ``.airflowignore`` are interpreted as either
un-anchored regexes or gitignore-like glob expressions, depending on
the ``DAG_IGNORE_FILE_SYNTAX`` configuration parameter.
"""
if self.read_dags_from_db:
return
self.log.info("Filling up the DagBag from %s", dag_folder)
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
# Ensure dag_folder is a str -- it may have been a pathlib.Path
dag_folder = correct_maybe_zipped(str(dag_folder))
for filepath in list_py_file_paths(
dag_folder,
safe_mode=safe_mode,
include_examples=include_examples,
):
try:
file_parse_start_dttm = timezone.utcnow()
found_dags = self.process_file(filepath, only_if_updated=only_if_updated, safe_mode=safe_mode)
file_parse_end_dttm = timezone.utcnow()
stats.append(
FileLoadStat(
file=filepath.replace(settings.DAGS_FOLDER, ""),
duration=file_parse_end_dttm - file_parse_start_dttm,
dag_num=len(found_dags),
task_num=sum(len(dag.tasks) for dag in found_dags),
dags=str([dag.dag_id for dag in found_dags]),
)
)
except Exception as e:
self.log.exception(e)
self.dagbag_stats = sorted(stats, key=lambda x: x.duration, reverse=True)
def collect_dags_from_db(self):
"""Collects DAGs from database."""
from airflow.models.serialized_dag import SerializedDagModel
with Stats.timer("collect_db_dags"):
self.log.info("Filling up the DagBag from database")
# The dagbag contains all rows in serialized_dag table. Deleted DAGs are deleted
# from the table by the scheduler job.
self.dags = SerializedDagModel.read_all_dags()
# Adds subdags.
# DAG post-processing steps such as self.bag_dag and croniter are not needed as
# they are done by scheduler before serialization.
subdags = {}
for dag in self.dags.values():
for subdag in dag.subdags:
subdags[subdag.dag_id] = subdag
self.dags.update(subdags)
def dagbag_report(self):
"""Prints a report around DagBag loading stats."""
stats = self.dagbag_stats
dag_folder = self.dag_folder
duration = sum((o.duration for o in stats), timedelta()).total_seconds()
dag_num = sum(o.dag_num for o in stats)
task_num = sum(o.task_num for o in stats)
table = tabulate(stats, headers="keys")
report = textwrap.dedent(
f"""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}\n{table}
"""
)
return report
@classmethod
@provide_session
def _sync_to_db(
cls,
dags: dict[str, DAG],
processor_subdir: str | None = None,
session: Session = NEW_SESSION,
):
"""Save attributes about list of DAG to the DB."""
# To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
log = cls.logger()
def _serialize_dag_capturing_errors(dag, session):
"""
Try to serialize the dag to the DB, but make a note of any errors.
We can't place them directly in import_errors, as this may be retried, and work the next time
"""
if dag.is_subdag:
return []
try:
# We can't use bulk_write_to_db as we want to capture each error individually
dag_was_updated = SerializedDagModel.write_dag(
dag,
min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session,
)
if dag_was_updated:
DagBag._sync_perm_for_dag(dag, session=session)
return []
except OperationalError:
raise
except Exception:
log.exception("Failed to write serialized DAG: %s", dag.fileloc)
dagbag_import_error_traceback_depth = conf.getint(
"core", "dagbag_import_error_traceback_depth"
)
return [(dag.fileloc, traceback.format_exc(limit=-dagbag_import_error_traceback_depth))]
# Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
# of any Operational Errors
# In case of failures, provide_session handles rollback
import_errors = {}
for attempt in run_with_db_retries(logger=log):
with attempt:
serialize_errors = []
log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
log.debug("Calling the DAG.bulk_sync_to_db method")
try:
# Write Serialized DAGs to DB, capturing errors
for dag in dags.values():
serialize_errors.extend(_serialize_dag_capturing_errors(dag, session))
DAG.bulk_write_to_db(dags.values(), processor_subdir=processor_subdir, session=session)
except OperationalError:
session.rollback()
raise
# Only now we are "complete" do we update import_errors - don't want to record errors from
# previous failed attempts
import_errors.update(dict(serialize_errors))
return import_errors
@provide_session
def sync_to_db(self, processor_subdir: str | None = None, session: Session = NEW_SESSION):
import_errors = DagBag._sync_to_db(dags=self.dags, processor_subdir=processor_subdir, session=session)
self.import_errors.update(import_errors)
@classmethod
@provide_session
def _sync_perm_for_dag(cls, dag: DAG, session: Session = NEW_SESSION):
"""Sync DAG specific permissions."""
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
cls.logger().debug("Syncing DAG permissions: %s to the DB", root_dag_id)
from airflow.www.security import ApplessAirflowSecurityManager
security_manager = ApplessAirflowSecurityManager(session=session)
security_manager.sync_perm_for_dag(root_dag_id, dag.access_control)
| 28,689 | 39.927247 | 110 |
py
|
airflow
|
airflow-main/airflow/models/taskmap.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Table to store information about mapped task instances (AIP-42)."""
from __future__ import annotations
import collections.abc
import enum
from typing import TYPE_CHECKING, Any, Collection
from sqlalchemy import CheckConstraint, Column, ForeignKeyConstraint, Integer, String
from airflow.models.base import COLLATION_ARGS, ID_LEN, Base
from airflow.utils.sqlalchemy import ExtendedJSON
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstance
class TaskMapVariant(enum.Enum):
"""Task map variant.
Possible values are **dict** (for a key-value mapping) and **list** (for an
ordered value sequence).
"""
DICT = "dict"
LIST = "list"
class TaskMap(Base):
"""Model to track dynamic task-mapping information.
This is currently only populated by an upstream TaskInstance pushing an
XCom that's pulled by a downstream for mapping purposes.
"""
__tablename__ = "task_map"
# Link to upstream TaskInstance creating this dynamic mapping information.
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
task_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
run_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
map_index = Column(Integer, primary_key=True)
length = Column(Integer, nullable=False)
keys = Column(ExtendedJSON, nullable=True)
__table_args__ = (
CheckConstraint(length >= 0, name="task_map_length_not_negative"),
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_map_task_instance_fkey",
ondelete="CASCADE",
onupdate="CASCADE",
),
)
def __init__(
self,
dag_id: str,
task_id: str,
run_id: str,
map_index: int,
length: int,
keys: list[Any] | None,
) -> None:
self.dag_id = dag_id
self.task_id = task_id
self.run_id = run_id
self.map_index = map_index
self.length = length
self.keys = keys
@classmethod
def from_task_instance_xcom(cls, ti: TaskInstance, value: Collection) -> TaskMap:
if ti.run_id is None:
raise ValueError("cannot record task map for unrun task instance")
return cls(
dag_id=ti.dag_id,
task_id=ti.task_id,
run_id=ti.run_id,
map_index=ti.map_index,
length=len(value),
keys=(list(value) if isinstance(value, collections.abc.Mapping) else None),
)
@property
def variant(self) -> TaskMapVariant:
if self.keys is None:
return TaskMapVariant.LIST
return TaskMapVariant.DICT
| 3,684 | 31.610619 | 87 |
py
|
airflow
|
airflow-main/airflow/models/slamiss.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import Boolean, Column, Index, String, Text
from airflow.models.base import COLLATION_ARGS, ID_LEN, Base
from airflow.utils.sqlalchemy import UtcDateTime
class SlaMiss(Base):
"""
Model that stores a history of the SLA that have been missed.
It is used to keep track of SLA failures over time and to avoid double
triggering alert emails.
"""
__tablename__ = "sla_miss"
task_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
email_sent = Column(Boolean, default=False)
timestamp = Column(UtcDateTime)
description = Column(Text)
notification_sent = Column(Boolean, default=False)
__table_args__ = (Index("sm_dag", dag_id, unique=False),)
def __repr__(self):
return str((self.dag_id, self.task_id, self.execution_date.isoformat()))
| 1,779 | 36.87234 | 80 |
py
|
airflow
|
airflow-main/airflow/models/errors.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import Column, Integer, String, Text
from airflow.models.base import Base
from airflow.utils.sqlalchemy import UtcDateTime
class ImportError(Base):
"""
A table to store all Import Errors. The ImportErrors are recorded when parsing DAGs.
This errors are displayed on the Webserver.
"""
__tablename__ = "import_error"
id = Column(Integer, primary_key=True)
timestamp = Column(UtcDateTime)
filename = Column(String(1024))
stacktrace = Column(Text)
| 1,324 | 34.810811 | 88 |
py
|
airflow
|
airflow-main/airflow/models/skipmixin.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Iterable, Sequence
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.serialization.pydantic.dag_run import DagRunPydantic
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.sqlalchemy import tuple_in_condition
from airflow.utils.state import TaskInstanceState
if TYPE_CHECKING:
from pendulum import DateTime
from sqlalchemy import Session
from airflow.models.operator import Operator
from airflow.models.taskmixin import DAGNode
from airflow.serialization.pydantic.taskinstance import TaskInstancePydantic
# The key used by SkipMixin to store XCom data.
XCOM_SKIPMIXIN_KEY = "skipmixin_key"
# The dictionary key used to denote task IDs that are skipped
XCOM_SKIPMIXIN_SKIPPED = "skipped"
# The dictionary key used to denote task IDs that are followed
XCOM_SKIPMIXIN_FOLLOWED = "followed"
def _ensure_tasks(nodes: Iterable[DAGNode]) -> Sequence[Operator]:
from airflow.models.baseoperator import BaseOperator
from airflow.models.mappedoperator import MappedOperator
return [n for n in nodes if isinstance(n, (BaseOperator, MappedOperator))]
class SkipMixin(LoggingMixin):
"""A Mixin to skip Tasks Instances."""
def _set_state_to_skipped(
self,
dag_run: DagRun | DagRunPydantic,
tasks: Sequence[str] | Sequence[tuple[str, int]],
session: Session,
) -> None:
"""Used internally to set state of task instances to skipped from the same dag run."""
if tasks:
now = timezone.utcnow()
TI = TaskInstance
query = session.query(TI).filter(
TI.dag_id == dag_run.dag_id,
TI.run_id == dag_run.run_id,
)
if isinstance(tasks[0], tuple):
query = query.filter(tuple_in_condition((TI.task_id, TI.map_index), tasks))
else:
query = query.filter(TI.task_id.in_(tasks))
query.update(
{
TaskInstance.state: TaskInstanceState.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now,
},
synchronize_session=False,
)
@provide_session
def skip(
self,
dag_run: DagRun | DagRunPydantic,
execution_date: DateTime,
tasks: Iterable[DAGNode],
session: Session = NEW_SESSION,
map_index: int = -1,
):
"""
Sets tasks instances to skipped from the same dag run.
If this instance has a `task_id` attribute, store the list of skipped task IDs to XCom
so that NotPreviouslySkippedDep knows these tasks should be skipped when they
are cleared.
:param dag_run: the DagRun for which to set the tasks to skipped
:param execution_date: execution_date
:param tasks: tasks to skip (not task_ids)
:param session: db session to use
:param map_index: map_index of the current task instance
"""
task_list = _ensure_tasks(tasks)
if not task_list:
return
if execution_date and not dag_run:
from airflow.models.dagrun import DagRun
warnings.warn(
"Passing an execution_date to `skip()` is deprecated in favour of passing a dag_run",
RemovedInAirflow3Warning,
stacklevel=2,
)
dag_run = (
session.query(DagRun)
.filter(
DagRun.dag_id == task_list[0].dag_id,
DagRun.execution_date == execution_date,
)
.one()
)
elif execution_date and dag_run and execution_date != dag_run.execution_date:
raise ValueError(
"execution_date has a different value to dag_run.execution_date -- please only pass dag_run"
)
if dag_run is None:
raise ValueError("dag_run is required")
task_ids_list = [d.task_id for d in task_list]
self._set_state_to_skipped(dag_run, task_ids_list, session)
session.commit()
# SkipMixin may not necessarily have a task_id attribute. Only store to XCom if one is available.
task_id: str | None = getattr(self, "task_id", None)
if task_id is not None:
from airflow.models.xcom import XCom
XCom.set(
key=XCOM_SKIPMIXIN_KEY,
value={XCOM_SKIPMIXIN_SKIPPED: task_ids_list},
task_id=task_id,
dag_id=dag_run.dag_id,
run_id=dag_run.run_id,
map_index=map_index,
session=session,
)
def skip_all_except(
self,
ti: TaskInstance | TaskInstancePydantic,
branch_task_ids: None | str | Iterable[str],
):
"""
This method implements the logic for a branching operator; given a single
task ID or list of task IDs to follow, this skips all other tasks
immediately downstream of this operator.
branch_task_ids is stored to XCom so that NotPreviouslySkippedDep knows skipped tasks or
newly added tasks should be skipped when they are cleared.
"""
self.log.info("Following branch %s", branch_task_ids)
if isinstance(branch_task_ids, str):
branch_task_id_set = {branch_task_ids}
elif isinstance(branch_task_ids, Iterable):
branch_task_id_set = set(branch_task_ids)
invalid_task_ids_type = {
(bti, type(bti).__name__) for bti in branch_task_ids if not isinstance(bti, str)
}
if invalid_task_ids_type:
raise AirflowException(
f"'branch_task_ids' expected all task IDs are strings. "
f"Invalid tasks found: {invalid_task_ids_type}."
)
elif branch_task_ids is None:
branch_task_id_set = set()
else:
raise AirflowException(
"'branch_task_ids' must be either None, a task ID, or an Iterable of IDs, "
f"but got {type(branch_task_ids).__name__!r}."
)
dag_run = ti.get_dagrun()
assert isinstance(dag_run, DagRun)
# TODO(potiuk): Handle TaskInstancePydantic case differently - we need to figure out the way to
# pass task that has been set in LocalTaskJob but in the way that TaskInstancePydantic definition
# does not attempt to serialize the field from/to ORM
task = ti.task # type: ignore[union-attr]
dag = task.dag
if TYPE_CHECKING:
assert dag
valid_task_ids = set(dag.task_ids)
invalid_task_ids = branch_task_id_set - valid_task_ids
if invalid_task_ids:
raise AirflowException(
"'branch_task_ids' must contain only valid task_ids. "
f"Invalid tasks found: {invalid_task_ids}."
)
downstream_tasks = _ensure_tasks(task.downstream_list)
if downstream_tasks:
# For a branching workflow that looks like this, when "branch" does skip_all_except("task1"),
# we intuitively expect both "task1" and "join" to execute even though strictly speaking,
# "join" is also immediately downstream of "branch" and should have been skipped. Therefore,
# we need a special case here for such empty branches: Check downstream tasks of branch_task_ids.
# In case the task to skip is also downstream of branch_task_ids, we add it to branch_task_ids and
# exclude it from skipping.
#
# branch -----> join
# \ ^
# v /
# task1
#
for branch_task_id in list(branch_task_id_set):
branch_task_id_set.update(dag.get_task(branch_task_id).get_flat_relative_ids(upstream=False))
skip_tasks = [
(t.task_id, downstream_ti.map_index)
for t in downstream_tasks
if (downstream_ti := dag_run.get_task_instance(t.task_id, map_index=ti.map_index))
and t.task_id not in branch_task_id_set
]
follow_task_ids = [t.task_id for t in downstream_tasks if t.task_id in branch_task_id_set]
self.log.info("Skipping tasks %s", skip_tasks)
with create_session() as session:
self._set_state_to_skipped(dag_run, skip_tasks, session=session)
# For some reason, session.commit() needs to happen before xcom_push.
# Otherwise the session is not committed.
session.commit()
ti.xcom_push(key=XCOM_SKIPMIXIN_KEY, value={XCOM_SKIPMIXIN_FOLLOWED: follow_task_ids})
| 9,955 | 39.636735 | 110 |
py
|
airflow
|
airflow-main/airflow/models/connection.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import logging
import warnings
from json import JSONDecodeError
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlsplit
from sqlalchemy import Boolean, Column, Integer, String, Text
from sqlalchemy.orm import declared_attr, reconstructor, synonym
from airflow.configuration import ensure_secrets_loaded
from airflow.exceptions import AirflowException, AirflowNotFoundException, RemovedInAirflow3Warning
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.secrets_masker import mask_secret
from airflow.utils.module_loading import import_string
log = logging.getLogger(__name__)
def parse_netloc_to_hostname(*args, **kwargs):
"""This method is deprecated."""
warnings.warn("This method is deprecated.", RemovedInAirflow3Warning)
return _parse_netloc_to_hostname(*args, **kwargs)
# Python automatically converts all letters to lowercase in hostname
# See: https://issues.apache.org/jira/browse/AIRFLOW-3615
def _parse_netloc_to_hostname(uri_parts):
"""Parse a URI string to get correct Hostname."""
hostname = unquote(uri_parts.hostname or "")
if "/" in hostname:
hostname = uri_parts.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = unquote(hostname)
return hostname
class Connection(Base, LoggingMixin):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
.. seealso::
For more information on how to use this class, see: :doc:`/howto/connection`
:param conn_id: The connection ID.
:param conn_type: The connection type.
:param description: The connection description.
:param host: The host.
:param login: The login.
:param password: The password.
:param schema: The schema.
:param port: The port number.
:param extra: Extra metadata. Non-standard data such as private/SSH keys can be saved here. JSON
encoded object.
:param uri: URI address describing connection parameters.
"""
EXTRA_KEY = "__extra__"
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN), unique=True, nullable=False)
conn_type = Column(String(500), nullable=False)
description = Column(Text().with_variant(Text(5000), "mysql").with_variant(String(5000), "sqlite"))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column("password", String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column("extra", Text())
def __init__(
self,
conn_id: str | None = None,
conn_type: str | None = None,
description: str | None = None,
host: str | None = None,
login: str | None = None,
password: str | None = None,
schema: str | None = None,
port: int | None = None,
extra: str | dict | None = None,
uri: str | None = None,
):
super().__init__()
self.conn_id = conn_id
self.description = description
if extra and not isinstance(extra, str):
extra = json.dumps(extra)
if uri and (conn_type or host or login or password or schema or port or extra):
raise AirflowException(
"You must create an object using the URI or individual values "
"(conn_type, host, login, password, schema, port or extra)."
"You can't mix these two ways to create this object."
)
if uri:
self._parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
if self.extra:
self._validate_extra(self.extra, self.conn_id)
if self.password:
mask_secret(self.password)
@staticmethod
def _validate_extra(extra, conn_id) -> None:
"""
Here we verify that ``extra`` is a JSON-encoded Python dict. From Airflow 3.0, we should no
longer suppress these errors but raise instead.
"""
if extra is None:
return None
try:
extra_parsed = json.loads(extra)
if not isinstance(extra_parsed, dict):
warnings.warn(
"Encountered JSON value in `extra` which does not parse as a dictionary in "
f"connection {conn_id!r}. From Airflow 3.0, the `extra` field must contain a JSON "
"representation of a Python dict.",
RemovedInAirflow3Warning,
stacklevel=3,
)
except json.JSONDecodeError:
warnings.warn(
f"Encountered non-JSON in `extra` field for connection {conn_id!r}. Support for "
"non-JSON `extra` will be removed in Airflow 3.0",
RemovedInAirflow3Warning,
stacklevel=2,
)
return None
@reconstructor
def on_db_load(self):
if self.password:
mask_secret(self.password)
def parse_from_uri(self, **uri):
"""This method is deprecated. Please use uri parameter in constructor."""
warnings.warn(
"This method is deprecated. Please use uri parameter in constructor.",
RemovedInAirflow3Warning,
)
self._parse_from_uri(**uri)
@staticmethod
def _normalize_conn_type(conn_type):
if conn_type == "postgresql":
conn_type = "postgres"
elif "-" in conn_type:
conn_type = conn_type.replace("-", "_")
return conn_type
def _parse_from_uri(self, uri: str):
schemes_count_in_uri = uri.count("://")
if schemes_count_in_uri > 2:
raise AirflowException(f"Invalid connection string: {uri}.")
host_with_protocol = schemes_count_in_uri == 2
uri_parts = urlsplit(uri)
conn_type = uri_parts.scheme
self.conn_type = self._normalize_conn_type(conn_type)
rest_of_the_url = uri.replace(f"{conn_type}://", ("" if host_with_protocol else "//"))
if host_with_protocol:
uri_splits = rest_of_the_url.split("://", 1)
if "@" in uri_splits[0] or ":" in uri_splits[0]:
raise AirflowException(f"Invalid connection string: {uri}.")
uri_parts = urlsplit(rest_of_the_url)
protocol = uri_parts.scheme if host_with_protocol else None
host = _parse_netloc_to_hostname(uri_parts)
self.host = self._create_host(protocol, host)
quoted_schema = uri_parts.path[1:]
self.schema = unquote(quoted_schema) if quoted_schema else quoted_schema
self.login = unquote(uri_parts.username) if uri_parts.username else uri_parts.username
self.password = unquote(uri_parts.password) if uri_parts.password else uri_parts.password
self.port = uri_parts.port
if uri_parts.query:
query = dict(parse_qsl(uri_parts.query, keep_blank_values=True))
if self.EXTRA_KEY in query:
self.extra = query[self.EXTRA_KEY]
else:
self.extra = json.dumps(query)
@staticmethod
def _create_host(protocol, host) -> str | None:
"""Returns the connection host with the protocol."""
if not host:
return host
if protocol:
return f"{protocol}://{host}"
return host
def get_uri(self) -> str:
"""Return connection in URI format."""
if self.conn_type and "_" in self.conn_type:
self.log.warning(
"Connection schemes (type: %s) shall not contain '_' according to RFC3986.",
self.conn_type,
)
if self.conn_type:
uri = f"{self.conn_type.lower().replace('_', '-')}://"
else:
uri = "//"
if self.host and "://" in self.host:
protocol, host = self.host.split("://", 1)
else:
protocol, host = None, self.host
if protocol:
uri += f"{protocol}://"
authority_block = ""
if self.login is not None:
authority_block += quote(self.login, safe="")
if self.password is not None:
authority_block += ":" + quote(self.password, safe="")
if authority_block > "":
authority_block += "@"
uri += authority_block
host_block = ""
if host:
host_block += quote(host, safe="")
if self.port:
if host_block == "" and authority_block == "":
host_block += f"@:{self.port}"
else:
host_block += f":{self.port}"
if self.schema:
host_block += f"/{quote(self.schema, safe='')}"
uri += host_block
if self.extra:
try:
query: str | None = urlencode(self.extra_dejson)
except TypeError:
query = None
if query and self.extra_dejson == dict(parse_qsl(query, keep_blank_values=True)):
uri += ("?" if self.schema else "/?") + query
else:
uri += ("?" if self.schema else "/?") + urlencode({self.EXTRA_KEY: self.extra})
return uri
def get_password(self) -> str | None:
"""Return encrypted password."""
if self._password and self.is_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
f"Can't decrypt encrypted password for login={self.login} "
f"FERNET_KEY configuration is missing"
)
return fernet.decrypt(bytes(self._password, "utf-8")).decode()
else:
return self._password
def set_password(self, value: str | None):
"""Encrypt password and set in object attribute."""
if value:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, "utf-8")).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def password(cls):
"""Password. The value is decrypted/encrypted when reading/setting the value."""
return synonym("_password", descriptor=property(cls.get_password, cls.set_password))
def get_extra(self) -> str:
"""Return encrypted extra-data."""
if self._extra and self.is_extra_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
f"Can't decrypt `extra` params for login={self.login}, "
f"FERNET_KEY configuration is missing"
)
extra_val = fernet.decrypt(bytes(self._extra, "utf-8")).decode()
else:
extra_val = self._extra
if extra_val:
self._validate_extra(extra_val, self.conn_id)
return extra_val
def set_extra(self, value: str):
"""Encrypt extra-data and save in object attribute to object."""
if value:
self._validate_extra(value, self.conn_id)
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, "utf-8")).decode()
self.is_extra_encrypted = fernet.is_encrypted
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
"""Extra data. The value is decrypted/encrypted when reading/setting the value."""
return synonym("_extra", descriptor=property(cls.get_extra, cls.set_extra))
def rotate_fernet_key(self):
"""Encrypts data with a new key. See: :ref:`security/fernet`."""
fernet = get_fernet()
if self._password and self.is_encrypted:
self._password = fernet.rotate(self._password.encode("utf-8")).decode()
if self._extra and self.is_extra_encrypted:
self._extra = fernet.rotate(self._extra.encode("utf-8")).decode()
def get_hook(self, *, hook_params=None):
"""Return hook based on conn_type."""
from airflow.providers_manager import ProvidersManager
hook = ProvidersManager().hooks.get(self.conn_type, None)
if hook is None:
raise AirflowException(f'Unknown hook type "{self.conn_type}"')
try:
hook_class = import_string(hook.hook_class_name)
except ImportError:
warnings.warn(
"Could not import %s when discovering %s %s",
hook.hook_class_name,
hook.hook_name,
hook.package_name,
)
raise
if hook_params is None:
hook_params = {}
return hook_class(**{hook.connection_id_attribute_name: self.conn_id}, **hook_params)
def __repr__(self):
return self.conn_id or ""
def log_info(self):
"""
This method is deprecated. You can read each field individually or use the
default representation (`__repr__`).
"""
warnings.warn(
"This method is deprecated. You can read each field individually or "
"use the default representation (__repr__).",
RemovedInAirflow3Warning,
stacklevel=2,
)
return (
f"id: {self.conn_id}. Host: {self.host}, Port: {self.port}, Schema: {self.schema}, "
f"Login: {self.login}, Password: {'XXXXXXXX' if self.password else None}, "
f"extra: {'XXXXXXXX' if self.extra_dejson else None}"
)
def debug_info(self):
"""
This method is deprecated. You can read each field individually or use the
default representation (`__repr__`).
"""
warnings.warn(
"This method is deprecated. You can read each field individually or "
"use the default representation (__repr__).",
RemovedInAirflow3Warning,
stacklevel=2,
)
return (
f"id: {self.conn_id}. Host: {self.host}, Port: {self.port}, Schema: {self.schema}, "
f"Login: {self.login}, Password: {'XXXXXXXX' if self.password else None}, "
f"extra: {self.extra_dejson}"
)
def test_connection(self):
"""Calls out get_hook method and executes test_connection method on that."""
status, message = False, ""
try:
hook = self.get_hook()
if getattr(hook, "test_connection", False):
status, message = hook.test_connection()
else:
message = (
f"Hook {hook.__class__.__name__} doesn't implement or inherit test_connection method"
)
except Exception as e:
message = str(e)
return status, message
@property
def extra_dejson(self) -> dict:
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except JSONDecodeError:
self.log.exception("Failed parsing the json for conn_id %s", self.conn_id)
# Mask sensitive keys from this list
mask_secret(obj)
return obj
@classmethod
def get_connection_from_secrets(cls, conn_id: str) -> Connection:
"""
Get connection by conn_id.
:param conn_id: connection id
:return: connection
"""
for secrets_backend in ensure_secrets_loaded():
try:
conn = secrets_backend.get_connection(conn_id=conn_id)
if conn:
return conn
except Exception:
log.exception(
"Unable to retrieve connection from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
raise AirflowNotFoundException(f"The conn_id `{conn_id}` isn't defined")
@classmethod
def from_json(cls, value, conn_id=None) -> Connection:
kwargs = json.loads(value)
extra = kwargs.pop("extra", None)
if extra:
kwargs["extra"] = extra if isinstance(extra, str) else json.dumps(extra)
conn_type = kwargs.pop("conn_type", None)
if conn_type:
kwargs["conn_type"] = cls._normalize_conn_type(conn_type)
port = kwargs.pop("port", None)
if port:
try:
kwargs["port"] = int(port)
except ValueError:
raise ValueError(f"Expected integer value for `port`, but got {port!r} instead.")
return Connection(conn_id=conn_id, **kwargs)
| 18,148 | 36.731809 | 105 |
py
|
airflow
|
airflow-main/airflow/models/taskfail.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Taskfail tracks the failed run durations of each task instance."""
from __future__ import annotations
from sqlalchemy import Column, ForeignKeyConstraint, Index, Integer, text
from sqlalchemy.orm import relationship
from airflow.models.base import Base, StringID
from airflow.utils.sqlalchemy import UtcDateTime
class TaskFail(Base):
"""TaskFail tracks the failed run durations of each task instance."""
__tablename__ = "task_fail"
id = Column(Integer, primary_key=True)
task_id = Column(StringID(), nullable=False)
dag_id = Column(StringID(), nullable=False)
run_id = Column(StringID(), nullable=False)
map_index = Column(Integer, nullable=False, server_default=text("-1"))
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Integer)
__table_args__ = (
Index("idx_task_fail_task_instance", dag_id, task_id, run_id, map_index),
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="task_fail_ti_fkey",
ondelete="CASCADE",
),
)
# We don't need a DB level FK here, as we already have that to TI (which has one to DR) but by defining
# the relationship we can more easily find the execution date for these rows
dag_run = relationship(
"DagRun",
primaryjoin="""and_(
TaskFail.dag_id == foreign(DagRun.dag_id),
TaskFail.run_id == foreign(DagRun.run_id),
)""",
viewonly=True,
)
def __init__(self, ti):
self.dag_id = ti.dag_id
self.task_id = ti.task_id
self.run_id = ti.run_id
self.map_index = ti.map_index
self.start_date = ti.start_date
self.end_date = ti.end_date
if self.end_date and self.start_date:
self.duration = int((self.end_date - self.start_date).total_seconds())
else:
self.duration = None
def __repr__(self):
prefix = f"<{self.__class__.__name__}: {self.dag_id}.{self.task_id} {self.run_id}"
if self.map_index != -1:
prefix += f" map_index={self.map_index}"
return prefix + ">"
| 3,133 | 35.870588 | 107 |
py
|
airflow
|
airflow-main/airflow/models/mappedoperator.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections
import collections.abc
import contextlib
import copy
import datetime
import warnings
from typing import TYPE_CHECKING, Any, ClassVar, Collection, Iterable, Iterator, Mapping, Sequence, Union
import attr
import pendulum
from sqlalchemy.orm.session import Session
from airflow import settings
from airflow.compat.functools import cache
from airflow.exceptions import AirflowException, UnmappableOperator
from airflow.models.abstractoperator import (
DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST,
DEFAULT_OWNER,
DEFAULT_POOL_SLOTS,
DEFAULT_PRIORITY_WEIGHT,
DEFAULT_QUEUE,
DEFAULT_RETRIES,
DEFAULT_RETRY_DELAY,
DEFAULT_TRIGGER_RULE,
DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING,
DEFAULT_WEIGHT_RULE,
AbstractOperator,
NotMapped,
TaskStateChangeCallback,
)
from airflow.models.expandinput import (
DictOfListsExpandInput,
ExpandInput,
ListOfDictsExpandInput,
OperatorExpandArgument,
OperatorExpandKwargsArgument,
is_mappable,
)
from airflow.models.param import ParamsDict
from airflow.models.pool import Pool
from airflow.serialization.enums import DagAttributeTypes
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.ti_deps.deps.mapped_task_expanded import MappedTaskIsExpanded
from airflow.typing_compat import Literal
from airflow.utils.context import Context, context_update_for_unmapped
from airflow.utils.helpers import is_container, prevent_duplicates
from airflow.utils.operator_resources import Resources
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import NOTSET
from airflow.utils.xcom import XCOM_RETURN_KEY
if TYPE_CHECKING:
import jinja2 # Slow import.
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DAG
from airflow.models.operator import Operator
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
ValidationSource = Union[Literal["expand"], Literal["partial"]]
def validate_mapping_kwargs(op: type[BaseOperator], func: ValidationSource, value: dict[str, Any]) -> None:
# use a dict so order of args is same as code order
unknown_args = value.copy()
for klass in op.mro():
init = klass.__init__ # type: ignore[misc]
try:
param_names = init._BaseOperatorMeta__param_names
except AttributeError:
continue
for name in param_names:
value = unknown_args.pop(name, NOTSET)
if func != "expand":
continue
if value is NOTSET:
continue
if is_mappable(value):
continue
type_name = type(value).__name__
error = f"{op.__name__}.expand() got an unexpected type {type_name!r} for keyword argument {name}"
raise ValueError(error)
if not unknown_args:
return # If we have no args left to check: stop looking at the MRO chain.
if len(unknown_args) == 1:
error = f"an unexpected keyword argument {unknown_args.popitem()[0]!r}"
else:
names = ", ".join(repr(n) for n in unknown_args)
error = f"unexpected keyword arguments {names}"
raise TypeError(f"{op.__name__}.{func}() got {error}")
def ensure_xcomarg_return_value(arg: Any) -> None:
from airflow.models.xcom_arg import XComArg
if isinstance(arg, XComArg):
for operator, key in arg.iter_references():
if key != XCOM_RETURN_KEY:
raise ValueError(f"cannot map over XCom with custom key {key!r} from {operator}")
elif not is_container(arg):
return
elif isinstance(arg, collections.abc.Mapping):
for v in arg.values():
ensure_xcomarg_return_value(v)
elif isinstance(arg, collections.abc.Iterable):
for v in arg:
ensure_xcomarg_return_value(v)
@attr.define(kw_only=True, repr=False)
class OperatorPartial:
"""An "intermediate state" returned by ``BaseOperator.partial()``.
This only exists at DAG-parsing time; the only intended usage is for the
user to call ``.expand()`` on it at some point (usually in a method chain) to
create a ``MappedOperator`` to add into the DAG.
"""
operator_class: type[BaseOperator]
kwargs: dict[str, Any]
params: ParamsDict | dict
_expand_called: bool = False # Set when expand() is called to ease user debugging.
def __attrs_post_init__(self):
from airflow.operators.subdag import SubDagOperator
if issubclass(self.operator_class, SubDagOperator):
raise TypeError("Mapping over deprecated SubDagOperator is not supported")
validate_mapping_kwargs(self.operator_class, "partial", self.kwargs)
def __repr__(self) -> str:
args = ", ".join(f"{k}={v!r}" for k, v in self.kwargs.items())
return f"{self.operator_class.__name__}.partial({args})"
def __del__(self):
if not self._expand_called:
try:
task_id = repr(self.kwargs["task_id"])
except KeyError:
task_id = f"at {hex(id(self))}"
warnings.warn(f"Task {task_id} was never mapped!")
def expand(self, **mapped_kwargs: OperatorExpandArgument) -> MappedOperator:
if not mapped_kwargs:
raise TypeError("no arguments to expand against")
validate_mapping_kwargs(self.operator_class, "expand", mapped_kwargs)
prevent_duplicates(self.kwargs, mapped_kwargs, fail_reason="unmappable or already specified")
# Since the input is already checked at parse time, we can set strict
# to False to skip the checks on execution.
return self._expand(DictOfListsExpandInput(mapped_kwargs), strict=False)
def expand_kwargs(self, kwargs: OperatorExpandKwargsArgument, *, strict: bool = True) -> MappedOperator:
from airflow.models.xcom_arg import XComArg
if isinstance(kwargs, collections.abc.Sequence):
for item in kwargs:
if not isinstance(item, (XComArg, collections.abc.Mapping)):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
elif not isinstance(kwargs, XComArg):
raise TypeError(f"expected XComArg or list[dict], not {type(kwargs).__name__}")
return self._expand(ListOfDictsExpandInput(kwargs), strict=strict)
def _expand(self, expand_input: ExpandInput, *, strict: bool) -> MappedOperator:
from airflow.operators.empty import EmptyOperator
self._expand_called = True
ensure_xcomarg_return_value(expand_input.value)
partial_kwargs = self.kwargs.copy()
task_id = partial_kwargs.pop("task_id")
dag = partial_kwargs.pop("dag")
task_group = partial_kwargs.pop("task_group")
start_date = partial_kwargs.pop("start_date")
end_date = partial_kwargs.pop("end_date")
try:
operator_name = self.operator_class.custom_operator_name # type: ignore
except AttributeError:
operator_name = self.operator_class.__name__
op = MappedOperator(
operator_class=self.operator_class,
expand_input=expand_input,
partial_kwargs=partial_kwargs,
task_id=task_id,
params=self.params,
deps=MappedOperator.deps_for(self.operator_class),
operator_extra_links=self.operator_class.operator_extra_links,
template_ext=self.operator_class.template_ext,
template_fields=self.operator_class.template_fields,
template_fields_renderers=self.operator_class.template_fields_renderers,
ui_color=self.operator_class.ui_color,
ui_fgcolor=self.operator_class.ui_fgcolor,
is_empty=issubclass(self.operator_class, EmptyOperator),
task_module=self.operator_class.__module__,
task_type=self.operator_class.__name__,
operator_name=operator_name,
dag=dag,
task_group=task_group,
start_date=start_date,
end_date=end_date,
disallow_kwargs_override=strict,
# For classic operators, this points to expand_input because kwargs
# to BaseOperator.expand() contribute to operator arguments.
expand_input_attr="expand_input",
)
return op
@attr.define(
kw_only=True,
# Disable custom __getstate__ and __setstate__ generation since it interacts
# badly with Airflow's DAG serialization and pickling. When a mapped task is
# deserialized, subclasses are coerced into MappedOperator, but when it goes
# through DAG pickling, all attributes defined in the subclasses are dropped
# by attrs's custom state management. Since attrs does not do anything too
# special here (the logic is only important for slots=True), we use Python's
# built-in implementation, which works (as proven by good old BaseOperator).
getstate_setstate=False,
)
class MappedOperator(AbstractOperator):
"""Object representing a mapped operator in a DAG."""
# This attribute serves double purpose. For a "normal" operator instance
# loaded from DAG, this holds the underlying non-mapped operator class that
# can be used to create an unmapped operator for execution. For an operator
# recreated from a serialized DAG, however, this holds the serialized data
# that can be used to unmap this into a SerializedBaseOperator.
operator_class: type[BaseOperator] | dict[str, Any]
expand_input: ExpandInput
partial_kwargs: dict[str, Any]
# Needed for serialization.
task_id: str
params: ParamsDict | dict
deps: frozenset[BaseTIDep]
operator_extra_links: Collection[BaseOperatorLink]
template_ext: Sequence[str]
template_fields: Collection[str]
template_fields_renderers: dict[str, str]
ui_color: str
ui_fgcolor: str
_is_empty: bool
_task_module: str
_task_type: str
_operator_name: str
dag: DAG | None
task_group: TaskGroup | None
start_date: pendulum.DateTime | None
end_date: pendulum.DateTime | None
upstream_task_ids: set[str] = attr.ib(factory=set, init=False)
downstream_task_ids: set[str] = attr.ib(factory=set, init=False)
_disallow_kwargs_override: bool
"""Whether execution fails if ``expand_input`` has duplicates to ``partial_kwargs``.
If *False*, values from ``expand_input`` under duplicate keys override those
under corresponding keys in ``partial_kwargs``.
"""
_expand_input_attr: str
"""Where to get kwargs to calculate expansion length against.
This should be a name to call ``getattr()`` on.
"""
subdag: None = None # Since we don't support SubDagOperator, this is always None.
supports_lineage: bool = False
HIDE_ATTRS_FROM_UI: ClassVar[frozenset[str]] = AbstractOperator.HIDE_ATTRS_FROM_UI | frozenset(
(
"parse_time_mapped_ti_count",
"operator_class",
)
)
def __hash__(self):
return id(self)
def __repr__(self):
return f"<Mapped({self._task_type}): {self.task_id}>"
def __attrs_post_init__(self):
from airflow.models.xcom_arg import XComArg
if self.get_closest_mapped_task_group() is not None:
raise NotImplementedError("operator expansion in an expanded task group is not yet supported")
if self.task_group:
self.task_group.add(self)
if self.dag:
self.dag.add_task(self)
XComArg.apply_upstream_relationship(self, self.expand_input.value)
for k, v in self.partial_kwargs.items():
if k in self.template_fields:
XComArg.apply_upstream_relationship(self, v)
if self.partial_kwargs.get("sla") is not None:
raise AirflowException(
f"SLAs are unsupported with mapped tasks. Please set `sla=None` for task "
f"{self.task_id!r}."
)
@AbstractOperator.is_setup.setter # type: ignore[attr-defined]
def is_setup(self, value):
"""
Setter for is_setup property. Disabled for MappedOperator.
:meta private:
"""
raise ValueError("Cannot set is_setup for mapped operator.")
@AbstractOperator.is_teardown.setter # type: ignore[attr-defined]
def is_teardown(self, value):
"""
Setter for is_teardown property. Disabled for MappedOperator.
:meta private:
"""
raise ValueError("Cannot set is_teardown for mapped operator.")
@classmethod
@cache
def get_serialized_fields(cls):
# Not using 'cls' here since we only want to serialize base fields.
return frozenset(attr.fields_dict(MappedOperator)) - {
"dag",
"deps",
"expand_input", # This is needed to be able to accept XComArg.
"subdag",
"task_group",
"upstream_task_ids",
"supports_lineage",
"is_setup",
"is_teardown",
"on_failure_fail_dagrun",
}
@staticmethod
@cache
def deps_for(operator_class: type[BaseOperator]) -> frozenset[BaseTIDep]:
operator_deps = operator_class.deps
if not isinstance(operator_deps, collections.abc.Set):
raise UnmappableOperator(
f"'deps' must be a set defined as a class-level variable on {operator_class.__name__}, "
f"not a {type(operator_deps).__name__}"
)
return operator_deps | {MappedTaskIsExpanded()}
@property
def task_type(self) -> str:
"""Implementing Operator."""
return self._task_type
@property
def operator_name(self) -> str:
return self._operator_name
@property
def inherits_from_empty_operator(self) -> bool:
"""Implementing Operator."""
return self._is_empty
@property
def roots(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def leaves(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def owner(self) -> str: # type: ignore[override]
return self.partial_kwargs.get("owner", DEFAULT_OWNER)
@property
def email(self) -> None | str | Iterable[str]:
return self.partial_kwargs.get("email")
@property
def trigger_rule(self) -> TriggerRule:
return self.partial_kwargs.get("trigger_rule", DEFAULT_TRIGGER_RULE)
@trigger_rule.setter
def trigger_rule(self, value):
# required for mypy which complains about overriding writeable attr with read-only property
raise ValueError("Cannot set trigger_rule for mapped operator.")
@property
def depends_on_past(self) -> bool:
return bool(self.partial_kwargs.get("depends_on_past"))
@property
def ignore_first_depends_on_past(self) -> bool:
value = self.partial_kwargs.get("ignore_first_depends_on_past", DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST)
return bool(value)
@property
def wait_for_past_depends_before_skipping(self) -> bool:
value = self.partial_kwargs.get(
"wait_for_past_depends_before_skipping", DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING
)
return bool(value)
@property
def wait_for_downstream(self) -> bool:
return bool(self.partial_kwargs.get("wait_for_downstream"))
@property
def retries(self) -> int | None:
return self.partial_kwargs.get("retries", DEFAULT_RETRIES)
@property
def queue(self) -> str:
return self.partial_kwargs.get("queue", DEFAULT_QUEUE)
@property
def pool(self) -> str:
return self.partial_kwargs.get("pool", Pool.DEFAULT_POOL_NAME)
@property
def pool_slots(self) -> str | None:
return self.partial_kwargs.get("pool_slots", DEFAULT_POOL_SLOTS)
@property
def execution_timeout(self) -> datetime.timedelta | None:
return self.partial_kwargs.get("execution_timeout")
@property
def max_retry_delay(self) -> datetime.timedelta | None:
return self.partial_kwargs.get("max_retry_delay")
@property
def retry_delay(self) -> datetime.timedelta:
return self.partial_kwargs.get("retry_delay", DEFAULT_RETRY_DELAY)
@property
def retry_exponential_backoff(self) -> bool:
return bool(self.partial_kwargs.get("retry_exponential_backoff"))
@property
def priority_weight(self) -> int: # type: ignore[override]
return self.partial_kwargs.get("priority_weight", DEFAULT_PRIORITY_WEIGHT)
@property
def weight_rule(self) -> int: # type: ignore[override]
return self.partial_kwargs.get("weight_rule", DEFAULT_WEIGHT_RULE)
@property
def sla(self) -> datetime.timedelta | None:
return self.partial_kwargs.get("sla")
@property
def max_active_tis_per_dag(self) -> int | None:
return self.partial_kwargs.get("max_active_tis_per_dag")
@property
def max_active_tis_per_dagrun(self) -> int | None:
return self.partial_kwargs.get("max_active_tis_per_dagrun")
@property
def resources(self) -> Resources | None:
return self.partial_kwargs.get("resources")
@property
def on_execute_callback(self) -> None | TaskStateChangeCallback | list[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_execute_callback")
@on_execute_callback.setter
def on_execute_callback(self, value: TaskStateChangeCallback | None) -> None:
self.partial_kwargs["on_execute_callback"] = value
@property
def on_failure_callback(self) -> None | TaskStateChangeCallback | list[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_failure_callback")
@on_failure_callback.setter
def on_failure_callback(self, value: TaskStateChangeCallback | None) -> None:
self.partial_kwargs["on_failure_callback"] = value
@property
def on_retry_callback(self) -> None | TaskStateChangeCallback | list[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_retry_callback")
@on_retry_callback.setter
def on_retry_callback(self, value: TaskStateChangeCallback | None) -> None:
self.partial_kwargs["on_retry_callback"] = value
@property
def on_success_callback(self) -> None | TaskStateChangeCallback | list[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_success_callback")
@on_success_callback.setter
def on_success_callback(self, value: TaskStateChangeCallback | None) -> None:
self.partial_kwargs["on_success_callback"] = value
@property
def run_as_user(self) -> str | None:
return self.partial_kwargs.get("run_as_user")
@property
def executor_config(self) -> dict:
return self.partial_kwargs.get("executor_config", {})
@property # type: ignore[override]
def inlets(self) -> list[Any]: # type: ignore[override]
return self.partial_kwargs.get("inlets", [])
@inlets.setter
def inlets(self, value: list[Any]) -> None: # type: ignore[override]
self.partial_kwargs["inlets"] = value
@property # type: ignore[override]
def outlets(self) -> list[Any]: # type: ignore[override]
return self.partial_kwargs.get("outlets", [])
@outlets.setter
def outlets(self, value: list[Any]) -> None: # type: ignore[override]
self.partial_kwargs["outlets"] = value
@property
def doc(self) -> str | None:
return self.partial_kwargs.get("doc")
@property
def doc_md(self) -> str | None:
return self.partial_kwargs.get("doc_md")
@property
def doc_json(self) -> str | None:
return self.partial_kwargs.get("doc_json")
@property
def doc_yaml(self) -> str | None:
return self.partial_kwargs.get("doc_yaml")
@property
def doc_rst(self) -> str | None:
return self.partial_kwargs.get("doc_rst")
def get_dag(self) -> DAG | None:
"""Implementing Operator."""
return self.dag
@property
def output(self) -> XComArg:
"""Returns reference to XCom pushed by current operator."""
from airflow.models.xcom_arg import XComArg
return XComArg(operator=self)
def serialize_for_task_group(self) -> tuple[DagAttributeTypes, Any]:
"""Implementing DAGNode."""
return DagAttributeTypes.OP, self.task_id
def _expand_mapped_kwargs(self, context: Context, session: Session) -> tuple[Mapping[str, Any], set[int]]:
"""Get the kwargs to create the unmapped operator.
This exists because taskflow operators expand against op_kwargs, not the
entire operator kwargs dict.
"""
return self._get_specified_expand_input().resolve(context, session)
def _get_unmap_kwargs(self, mapped_kwargs: Mapping[str, Any], *, strict: bool) -> dict[str, Any]:
"""Get init kwargs to unmap the underlying operator class.
:param mapped_kwargs: The dict returned by ``_expand_mapped_kwargs``.
"""
if strict:
prevent_duplicates(
self.partial_kwargs,
mapped_kwargs,
fail_reason="unmappable or already specified",
)
# If params appears in the mapped kwargs, we need to merge it into the
# partial params, overriding existing keys.
params = copy.copy(self.params)
with contextlib.suppress(KeyError):
params.update(mapped_kwargs["params"])
# Ordering is significant; mapped kwargs should override partial ones,
# and the specially handled params should be respected.
return {
"task_id": self.task_id,
"dag": self.dag,
"task_group": self.task_group,
"start_date": self.start_date,
"end_date": self.end_date,
**self.partial_kwargs,
**mapped_kwargs,
"params": params,
}
def unmap(self, resolve: None | Mapping[str, Any] | tuple[Context, Session]) -> BaseOperator:
"""Get the "normal" Operator after applying the current mapping.
The *resolve* argument is only used if ``operator_class`` is a real
class, i.e. if this operator is not serialized. If ``operator_class`` is
not a class (i.e. this DAG has been deserialized), this returns a
SerializedBaseOperator that "looks like" the actual unmapping result.
If *resolve* is a two-tuple (context, session), the information is used
to resolve the mapped arguments into init arguments. If it is a mapping,
no resolving happens, the mapping directly provides those init arguments
resolved from mapped kwargs.
:meta private:
"""
if isinstance(self.operator_class, type):
if isinstance(resolve, collections.abc.Mapping):
kwargs = resolve
elif resolve is not None:
kwargs, _ = self._expand_mapped_kwargs(*resolve)
else:
raise RuntimeError("cannot unmap a non-serialized operator without context")
kwargs = self._get_unmap_kwargs(kwargs, strict=self._disallow_kwargs_override)
op = self.operator_class(**kwargs, _airflow_from_mapped=True)
# We need to overwrite task_id here because BaseOperator further
# mangles the task_id based on the task hierarchy (namely, group_id
# is prepended, and '__N' appended to deduplicate). This is hacky,
# but better than duplicating the whole mangling logic.
op.task_id = self.task_id
return op
# After a mapped operator is serialized, there's no real way to actually
# unmap it since we've lost access to the underlying operator class.
# This tries its best to simply "forward" all the attributes on this
# mapped operator to a new SerializedBaseOperator instance.
from airflow.serialization.serialized_objects import SerializedBaseOperator
op = SerializedBaseOperator(task_id=self.task_id, params=self.params, _airflow_from_mapped=True)
SerializedBaseOperator.populate_operator(op, self.operator_class)
return op
def _get_specified_expand_input(self) -> ExpandInput:
"""Input received from the expand call on the operator."""
return getattr(self, self._expand_input_attr)
def prepare_for_execution(self) -> MappedOperator:
# Since a mapped operator cannot be used for execution, and an unmapped
# BaseOperator needs to be created later (see render_template_fields),
# we don't need to create a copy of the MappedOperator here.
return self
def iter_mapped_dependencies(self) -> Iterator[Operator]:
"""Upstream dependencies that provide XComs used by this task for task mapping."""
from airflow.models.xcom_arg import XComArg
for operator, _ in XComArg.iter_xcom_references(self._get_specified_expand_input()):
yield operator
@cache
def get_parse_time_mapped_ti_count(self) -> int:
current_count = self._get_specified_expand_input().get_parse_time_mapped_ti_count()
try:
parent_count = super().get_parse_time_mapped_ti_count()
except NotMapped:
return current_count
return parent_count * current_count
def get_mapped_ti_count(self, run_id: str, *, session: Session) -> int:
current_count = self._get_specified_expand_input().get_total_map_length(run_id, session=session)
try:
parent_count = super().get_mapped_ti_count(run_id, session=session)
except NotMapped:
return current_count
return parent_count * current_count
def render_template_fields(
self,
context: Context,
jinja_env: jinja2.Environment | None = None,
) -> None:
"""Template all attributes listed in *self.template_fields*.
This updates *context* to reference the map-expanded task and relevant
information, without modifying the mapped operator. The expanded task
in *context* is then rendered in-place.
:param context: Context dict with values to apply on content.
:param jinja_env: Jinja environment to use for rendering.
"""
if not jinja_env:
jinja_env = self.get_template_env()
# Ideally we'd like to pass in session as an argument to this function,
# but we can't easily change this function signature since operators
# could override this. We can't use @provide_session since it closes and
# expunges everything, which we don't want to do when we are so "deep"
# in the weeds here. We don't close this session for the same reason.
session = settings.Session()
mapped_kwargs, seen_oids = self._expand_mapped_kwargs(context, session)
unmapped_task = self.unmap(mapped_kwargs)
context_update_for_unmapped(context, unmapped_task)
# Since the operators that extend `BaseOperator` are not subclasses of
# `MappedOperator`, we need to call `_do_render_template_fields` from
# the unmapped task in order to call the operator method when we override
# it to customize the parsing of nested fields.
unmapped_task._do_render_template_fields(
parent=unmapped_task,
template_fields=self.template_fields,
context=context,
jinja_env=jinja_env,
seen_oids=seen_oids,
session=session,
)
| 28,565 | 37.865306 | 110 |
py
|
airflow
|
airflow-main/airflow/models/trigger.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
from traceback import format_exception
from typing import Any, Iterable
from sqlalchemy import Column, Integer, String, delete, func, or_
from sqlalchemy.orm import Session, joinedload, relationship
from airflow.api_internal.internal_api_call import internal_api_call
from airflow.models.base import Base
from airflow.models.taskinstance import TaskInstance
from airflow.triggers.base import BaseTrigger
from airflow.utils import timezone
from airflow.utils.retries import run_with_db_retries
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import ExtendedJSON, UtcDateTime, with_row_locks
from airflow.utils.state import TaskInstanceState
class Trigger(Base):
"""
Triggers are a workload that run in an asynchronous event loop shared with
other Triggers, and fire off events that will unpause deferred Tasks,
start linked DAGs, etc.
They are persisted into the database and then re-hydrated into a
"triggerer" process, where many are run at once. We model it so that
there is a many-to-one relationship between Task and Trigger, for future
deduplication logic to use.
Rows will be evicted from the database when the triggerer detects no
active Tasks/DAGs using them. Events are not stored in the database;
when an Event is fired, the triggerer will directly push its data to the
appropriate Task/DAG.
"""
__tablename__ = "trigger"
id = Column(Integer, primary_key=True)
classpath = Column(String(1000), nullable=False)
kwargs = Column(ExtendedJSON, nullable=False)
created_date = Column(UtcDateTime, nullable=False)
triggerer_id = Column(Integer, nullable=True)
triggerer_job = relationship(
"Job",
primaryjoin="Job.id == Trigger.triggerer_id",
foreign_keys=triggerer_id,
uselist=False,
)
task_instance = relationship("TaskInstance", back_populates="trigger", lazy="joined", uselist=False)
def __init__(
self,
classpath: str,
kwargs: dict[str, Any],
created_date: datetime.datetime | None = None,
) -> None:
super().__init__()
self.classpath = classpath
self.kwargs = kwargs
self.created_date = created_date or timezone.utcnow()
@classmethod
@internal_api_call
def from_object(cls, trigger: BaseTrigger) -> Trigger:
"""
Alternative constructor that creates a trigger row based directly
off of a Trigger object.
"""
classpath, kwargs = trigger.serialize()
return cls(classpath=classpath, kwargs=kwargs)
@classmethod
@internal_api_call
@provide_session
def bulk_fetch(cls, ids: Iterable[int], session: Session = NEW_SESSION) -> dict[int, Trigger]:
"""
Fetches all the Triggers by ID and returns a dict mapping
ID -> Trigger instance.
"""
query = (
session.query(cls)
.filter(cls.id.in_(ids))
.options(
joinedload("task_instance"),
joinedload("task_instance.trigger"),
joinedload("task_instance.trigger.triggerer_job"),
)
)
return {obj.id: obj for obj in query}
@classmethod
@internal_api_call
@provide_session
def clean_unused(cls, session: Session = NEW_SESSION) -> None:
"""Deletes all triggers that have no tasks dependent on them.
Triggers have a one-to-many relationship to task instances, so we need
to clean those up first. Afterwards we can drop the triggers not
referenced by anyone.
"""
# Update all task instances with trigger IDs that are not DEFERRED to remove them
for attempt in run_with_db_retries():
with attempt:
session.query(TaskInstance).filter(
TaskInstance.state != TaskInstanceState.DEFERRED, TaskInstance.trigger_id.isnot(None)
).update({TaskInstance.trigger_id: None})
# Get all triggers that have no task instances depending on them...
ids = [
trigger_id
for (trigger_id,) in (
session.query(cls.id)
.join(TaskInstance, cls.id == TaskInstance.trigger_id, isouter=True)
.group_by(cls.id)
.having(func.count(TaskInstance.trigger_id) == 0)
)
]
# ...and delete them (we can't do this in one query due to MySQL)
session.execute(
delete(Trigger).where(Trigger.id.in_(ids)).execution_options(synchronize_session=False)
)
@classmethod
@internal_api_call
@provide_session
def submit_event(cls, trigger_id, event, session: Session = NEW_SESSION) -> None:
"""
Takes an event from an instance of itself, and triggers all dependent
tasks to resume.
"""
for task_instance in session.query(TaskInstance).filter(
TaskInstance.trigger_id == trigger_id, TaskInstance.state == TaskInstanceState.DEFERRED
):
# Add the event's payload into the kwargs for the task
next_kwargs = task_instance.next_kwargs or {}
next_kwargs["event"] = event.payload
task_instance.next_kwargs = next_kwargs
# Remove ourselves as its trigger
task_instance.trigger_id = None
# Finally, mark it as scheduled so it gets re-queued
task_instance.state = TaskInstanceState.SCHEDULED
@classmethod
@internal_api_call
@provide_session
def submit_failure(cls, trigger_id, exc=None, session: Session = NEW_SESSION) -> None:
"""
Called when a trigger has failed unexpectedly, and we need to mark
everything that depended on it as failed. Notably, we have to actually
run the failure code from a worker as it may have linked callbacks, so
hilariously we have to re-schedule the task instances to a worker just
so they can then fail.
We use a special __fail__ value for next_method to achieve this that
the runtime code understands as immediate-fail, and pack the error into
next_kwargs.
TODO: Once we have shifted callback (and email) handling to run on
workers as first-class concepts, we can run the failure code here
in-process, but we can't do that right now.
"""
for task_instance in session.query(TaskInstance).filter(
TaskInstance.trigger_id == trigger_id, TaskInstance.state == TaskInstanceState.DEFERRED
):
# Add the error and set the next_method to the fail state
traceback = format_exception(type(exc), exc, exc.__traceback__) if exc else None
task_instance.next_method = "__fail__"
task_instance.next_kwargs = {"error": "Trigger failure", "traceback": traceback}
# Remove ourselves as its trigger
task_instance.trigger_id = None
# Finally, mark it as scheduled so it gets re-queued
task_instance.state = TaskInstanceState.SCHEDULED
@classmethod
@internal_api_call
@provide_session
def ids_for_triggerer(cls, triggerer_id, session: Session = NEW_SESSION) -> list[int]:
"""Retrieves a list of triggerer_ids."""
return [row[0] for row in session.query(cls.id).filter(cls.triggerer_id == triggerer_id)]
@classmethod
@internal_api_call
@provide_session
def assign_unassigned(cls, triggerer_id, capacity, heartrate, session: Session = NEW_SESSION) -> None:
"""
Takes a triggerer_id, the capacity for that triggerer and the Triggerer job heartrate,
and assigns unassigned triggers until that capacity is reached, or there are no more
unassigned triggers.
"""
from airflow.jobs.job import Job # To avoid circular import
count = session.query(func.count(cls.id)).filter(cls.triggerer_id == triggerer_id).scalar()
capacity -= count
if capacity <= 0:
return
# we multiply heartrate by a grace_multiplier to give the triggerer
# a chance to heartbeat before we consider it dead
health_check_threshold = heartrate * 2.1
alive_triggerer_ids = [
row[0]
for row in session.query(Job.id).filter(
Job.end_date.is_(None),
Job.latest_heartbeat > timezone.utcnow() - datetime.timedelta(seconds=health_check_threshold),
Job.job_type == "TriggererJob",
)
]
# Find triggers who do NOT have an alive triggerer_id, and then assign
# up to `capacity` of those to us.
trigger_ids_query = cls.get_sorted_triggers(
capacity=capacity, alive_triggerer_ids=alive_triggerer_ids, session=session
)
if trigger_ids_query:
session.query(cls).filter(cls.id.in_([i.id for i in trigger_ids_query])).update(
{cls.triggerer_id: triggerer_id},
synchronize_session=False,
)
session.commit()
@classmethod
def get_sorted_triggers(cls, capacity, alive_triggerer_ids, session):
return with_row_locks(
session.query(cls.id)
.filter(or_(cls.triggerer_id.is_(None), cls.triggerer_id.notin_(alive_triggerer_ids)))
.order_by(cls.created_date)
.limit(capacity),
session,
skip_locked=True,
).all()
| 10,333 | 40.336 | 110 |
py
|
airflow
|
airflow-main/airflow/models/renderedtifields.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Save Rendered Template Fields."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import sqlalchemy_jsonfield
from sqlalchemy import Column, ForeignKeyConstraint, Integer, PrimaryKeyConstraint, delete, select, text
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import Session, relationship
from airflow.configuration import conf
from airflow.models.base import Base, StringID
from airflow.models.taskinstance import TaskInstance
from airflow.serialization.helpers import serialize_template_field
from airflow.settings import json
from airflow.utils.retries import retry_db_transaction
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import tuple_not_in_condition
if TYPE_CHECKING:
from sqlalchemy.sql import FromClause
class RenderedTaskInstanceFields(Base):
"""Save Rendered Template Fields."""
__tablename__ = "rendered_task_instance_fields"
dag_id = Column(StringID(), primary_key=True)
task_id = Column(StringID(), primary_key=True)
run_id = Column(StringID(), primary_key=True)
map_index = Column(Integer, primary_key=True, server_default=text("-1"))
rendered_fields = Column(sqlalchemy_jsonfield.JSONField(json=json), nullable=False)
k8s_pod_yaml = Column(sqlalchemy_jsonfield.JSONField(json=json), nullable=True)
__table_args__ = (
PrimaryKeyConstraint(
"dag_id",
"task_id",
"run_id",
"map_index",
name="rendered_task_instance_fields_pkey",
mssql_clustered=True,
),
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="rtif_ti_fkey",
ondelete="CASCADE",
),
)
task_instance = relationship(
"TaskInstance",
lazy="joined",
back_populates="rendered_task_instance_fields",
)
# We don't need a DB level FK here, as we already have that to TI (which has one to DR) but by defining
# the relationship we can more easily find the execution date for these rows
dag_run = relationship(
"DagRun",
primaryjoin="""and_(
RenderedTaskInstanceFields.dag_id == foreign(DagRun.dag_id),
RenderedTaskInstanceFields.run_id == foreign(DagRun.run_id),
)""",
viewonly=True,
)
execution_date = association_proxy("dag_run", "execution_date")
def __init__(self, ti: TaskInstance, render_templates=True):
self.dag_id = ti.dag_id
self.task_id = ti.task_id
self.run_id = ti.run_id
self.map_index = ti.map_index
self.ti = ti
if render_templates:
ti.render_templates()
self.task = ti.task
if os.environ.get("AIRFLOW_IS_K8S_EXECUTOR_POD", None):
self.k8s_pod_yaml = ti.render_k8s_pod_yaml()
self.rendered_fields = {
field: serialize_template_field(getattr(self.task, field)) for field in self.task.template_fields
}
self._redact()
def __repr__(self):
prefix = f"<{self.__class__.__name__}: {self.dag_id}.{self.task_id} {self.run_id}"
if self.map_index != -1:
prefix += f" map_index={self.map_index}"
return prefix + ">"
def _redact(self):
from airflow.utils.log.secrets_masker import redact
if self.k8s_pod_yaml:
self.k8s_pod_yaml = redact(self.k8s_pod_yaml)
for field, rendered in self.rendered_fields.items():
self.rendered_fields[field] = redact(rendered, field)
@classmethod
@provide_session
def get_templated_fields(cls, ti: TaskInstance, session: Session = NEW_SESSION) -> dict | None:
"""
Get templated field for a TaskInstance from the RenderedTaskInstanceFields
table.
:param ti: Task Instance
:param session: SqlAlchemy Session
:return: Rendered Templated TI field
"""
result = session.scalar(
select(cls).where(
cls.dag_id == ti.dag_id,
cls.task_id == ti.task_id,
cls.run_id == ti.run_id,
cls.map_index == ti.map_index,
)
)
if result:
rendered_fields = result.rendered_fields
return rendered_fields
else:
return None
@classmethod
@provide_session
def get_k8s_pod_yaml(cls, ti: TaskInstance, session: Session = NEW_SESSION) -> dict | None:
"""
Get rendered Kubernetes Pod Yaml for a TaskInstance from the RenderedTaskInstanceFields
table.
:param ti: Task Instance
:param session: SqlAlchemy Session
:return: Kubernetes Pod Yaml
"""
result = session.scalar(
select(cls).where(
cls.dag_id == ti.dag_id,
cls.task_id == ti.task_id,
cls.run_id == ti.run_id,
cls.map_index == ti.map_index,
)
)
return result.k8s_pod_yaml if result else None
@provide_session
@retry_db_transaction
def write(self, session: Session = None):
"""Write instance to database.
:param session: SqlAlchemy Session
"""
session.merge(self)
@classmethod
@provide_session
def delete_old_records(
cls,
task_id: str,
dag_id: str,
num_to_keep: int = conf.getint("core", "max_num_rendered_ti_fields_per_task", fallback=0),
session: Session = NEW_SESSION,
) -> None:
"""
Keep only Last X (num_to_keep) number of records for a task by deleting others.
In the case of data for a mapped task either all of the rows or none of the rows will be deleted, so
we don't end up with partial data for a set of mapped Task Instances left in the database.
:param task_id: Task ID
:param dag_id: Dag ID
:param num_to_keep: Number of Records to keep
:param session: SqlAlchemy Session
"""
from airflow.models.dagrun import DagRun
if num_to_keep <= 0:
return
tis_to_keep_query = (
select(cls.dag_id, cls.task_id, cls.run_id, DagRun.execution_date)
.where(cls.dag_id == dag_id, cls.task_id == task_id)
.join(cls.dag_run)
.distinct()
.order_by(DagRun.execution_date.desc())
.limit(num_to_keep)
)
cls._do_delete_old_records(
dag_id=dag_id,
task_id=task_id,
ti_clause=tis_to_keep_query.subquery(),
session=session,
)
session.flush()
@classmethod
@retry_db_transaction
def _do_delete_old_records(
cls,
*,
task_id: str,
dag_id: str,
ti_clause: FromClause,
session: Session,
) -> None:
# This query might deadlock occasionally and it should be retried if fails (see decorator)
stmt = (
delete(cls)
.where(
cls.dag_id == dag_id,
cls.task_id == task_id,
tuple_not_in_condition(
(cls.dag_id, cls.task_id, cls.run_id),
select(ti_clause.c.dag_id, ti_clause.c.task_id, ti_clause.c.run_id),
session=session,
),
)
.execution_options(synchronize_session=False)
)
session.execute(stmt)
| 8,477 | 33.048193 | 109 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.