response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Add is_orphaned to DatasetModel
def upgrade(): """Add is_orphaned to DatasetModel""" with op.batch_alter_table("dataset") as batch_op: batch_op.add_column( sa.Column( "is_orphaned", sa.Boolean, default=False, nullable=False, server_default="0", ) )
Remove is_orphaned from DatasetModel
def downgrade(): """Remove is_orphaned from DatasetModel""" with op.batch_alter_table("dataset") as batch_op: batch_op.drop_column("is_orphaned", mssql_drop_default=True)
Apply add dttm index on log table
def upgrade(): """Apply add dttm index on log table""" op.create_index("idx_log_dttm", "log", ["dttm"], unique=False)
Unapply add dttm index on log table
def downgrade(): """Unapply add dttm index on log table""" op.drop_index("idx_log_dttm", table_name="log")
Increase length of user identifier columns in ab_user and ab_register_user tables
def upgrade(): """Increase length of user identifier columns in ab_user and ab_register_user tables""" with op.batch_alter_table("ab_user") as batch_op: batch_op.alter_column("first_name", type_=sa.String(256), existing_nullable=False) batch_op.alter_column("last_name", type_=sa.String(256), existing_nullable=False) batch_op.alter_column( "username", type_=sa.String(512).with_variant(sa.String(512, collation="NOCASE"), "sqlite"), existing_nullable=False, ) batch_op.alter_column("email", type_=sa.String(512), existing_nullable=False) with op.batch_alter_table("ab_register_user") as batch_op: batch_op.alter_column("first_name", type_=sa.String(256), existing_nullable=False) batch_op.alter_column("last_name", type_=sa.String(256), existing_nullable=False) batch_op.alter_column( "username", type_=sa.String(512).with_variant(sa.String(512, collation="NOCASE"), "sqlite"), existing_nullable=False, ) batch_op.alter_column("email", type_=sa.String(512), existing_nullable=False)
Revert length of user identifier columns in ab_user and ab_register_user tables
def downgrade(): """Revert length of user identifier columns in ab_user and ab_register_user tables""" conn = op.get_bind() if conn.dialect.name != "mssql": with op.batch_alter_table("ab_user") as batch_op: batch_op.alter_column("first_name", type_=sa.String(64), existing_nullable=False) batch_op.alter_column("last_name", type_=sa.String(64), existing_nullable=False) batch_op.alter_column( "username", type_=sa.String(256).with_variant(sa.String(256, collation="NOCASE"), "sqlite"), existing_nullable=False, ) batch_op.alter_column("email", type_=sa.String(256), existing_nullable=False) with op.batch_alter_table("ab_register_user") as batch_op: batch_op.alter_column("first_name", type_=sa.String(64), existing_nullable=False) batch_op.alter_column("last_name", type_=sa.String(64), existing_nullable=False) batch_op.alter_column( "username", type_=sa.String(256).with_variant(sa.String(256, collation="NOCASE"), "sqlite"), existing_nullable=False, ) batch_op.alter_column("email", type_=sa.String(256), existing_nullable=False) else: # MSSQL doesn't drop implicit unique constraints it created # We need to drop the two unique constraints explicitly with op.batch_alter_table("ab_user") as batch_op: batch_op.alter_column("first_name", type_=sa.String(64), existing_nullable=False) batch_op.alter_column("last_name", type_=sa.String(64), existing_nullable=False) # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, "ab_user") for k, _ in constraints.get("UNIQUE").items(): batch_op.drop_constraint(k, type_="unique") batch_op.alter_column("username", type_=sa.String(256), existing_nullable=False) batch_op.create_unique_constraint(None, ["username"]) batch_op.alter_column("email", type_=sa.String(256), existing_nullable=False) batch_op.create_unique_constraint(None, ["email"]) with op.batch_alter_table("ab_register_user") as batch_op: batch_op.alter_column("first_name", type_=sa.String(64), existing_nullable=False) batch_op.alter_column("last_name", type_=sa.String(64), existing_nullable=False) batch_op.alter_column("email", type_=sa.String(256), existing_nullable=False) # Drop the unique constraint on username constraints = get_mssql_table_constraints(conn, "ab_register_user") for k, _ in constraints.get("UNIQUE").items(): batch_op.drop_constraint(k, type_="unique") batch_op.alter_column("username", type_=sa.String(256), existing_nullable=False) batch_op.create_unique_constraint(None, ["username"])
Apply Add onupdate cascade to taskmap
def upgrade(): """Apply Add onupdate cascade to taskmap""" with op.batch_alter_table("task_map") as batch_op: batch_op.drop_constraint("task_map_task_instance_fkey", type_="foreignkey") batch_op.create_foreign_key( "task_map_task_instance_fkey", "task_instance", ["dag_id", "task_id", "run_id", "map_index"], ["dag_id", "task_id", "run_id", "map_index"], ondelete="CASCADE", onupdate="CASCADE", )
Unapply Add onupdate cascade to taskmap
def downgrade(): """Unapply Add onupdate cascade to taskmap""" with op.batch_alter_table("task_map") as batch_op: batch_op.drop_constraint("task_map_task_instance_fkey", type_="foreignkey") batch_op.create_foreign_key( "task_map_task_instance_fkey", "task_instance", ["dag_id", "task_id", "run_id", "map_index"], ["dag_id", "task_id", "run_id", "map_index"], ondelete="CASCADE", )
Apply Add index to task_instance table
def upgrade(): """Apply Add index to task_instance table""" # We don't add this index anymore because it's not useful. pass
Unapply Add index to task_instance table
def downgrade(): """Unapply Add index to task_instance table""" # At 2.8.1 we removed this index as it is not used, and changed this migration not to add it # So we use drop if exists (cus it might not be there) import sqlalchemy from contextlib import suppress with suppress(sqlalchemy.exc.DatabaseError): # mysql does not support drop if exists index op.drop_index("ti_state_incl_start_date", table_name="task_instance", if_exists=True)
Apply Add custom_operator_name column
def upgrade(): """Apply Add custom_operator_name column""" with op.batch_alter_table(TABLE_NAME) as batch_op: batch_op.add_column(sa.Column("custom_operator_name", sa.VARCHAR(length=1000), nullable=True))
Unapply Add custom_operator_name column
def downgrade(): """Unapply Add custom_operator_name column""" with op.batch_alter_table(TABLE_NAME) as batch_op: batch_op.drop_column("custom_operator_name")
Apply add include_deferred column to pool
def upgrade(): """Apply add include_deferred column to pool""" with op.batch_alter_table("slot_pool") as batch_op: batch_op.add_column(sa.Column("include_deferred", sa.Boolean)) # Different databases support different literal for FALSE. This is fine. op.execute(sa.text(f"UPDATE slot_pool SET include_deferred = {sa.false().compile(op.get_bind())}")) with op.batch_alter_table("slot_pool") as batch_op: batch_op.alter_column("include_deferred", existing_type=sa.Boolean, nullable=False)
Unapply add include_deferred column to pool
def downgrade(): """Unapply add include_deferred column to pool""" with op.batch_alter_table("slot_pool") as batch_op: batch_op.drop_column("include_deferred")
Apply add cleared column to dagrun
def upgrade(): """Apply add cleared column to dagrun""" conn = op.get_bind() if conn.dialect.name == "mssql": with op.batch_alter_table("dag_run") as batch_op: batch_op.add_column(sa.Column("clear_number", sa.Integer, default=0)) batch_op.alter_column("clear_number", existing_type=sa.Integer, nullable=False) else: with op.batch_alter_table("dag_run") as batch_op: batch_op.add_column( sa.Column( "clear_number", sa.Integer, default=0, nullable=False, server_default="0", ) )
Unapply add cleared column to pool
def downgrade(): """Unapply add cleared column to pool""" with op.batch_alter_table("dag_run") as batch_op: batch_op.drop_column("clear_number")
Adds owner_display_name column to log
def upgrade(): """Adds owner_display_name column to log""" with op.batch_alter_table(TABLE_NAME) as batch_op: batch_op.add_column(sa.Column("owner_display_name", sa.String(500)))
Removes owner_display_name column from log
def downgrade(): """Removes owner_display_name column from log""" with op.batch_alter_table(TABLE_NAME) as batch_op: batch_op.drop_column("owner_display_name")
Apply Make connection login/password TEXT
def upgrade(): """Apply Make connection login/password TEXT""" with op.batch_alter_table("connection", schema=None) as batch_op: batch_op.alter_column( "login", existing_type=sa.VARCHAR(length=500), type_=sa.Text(), existing_nullable=True ) batch_op.alter_column( "password", existing_type=sa.VARCHAR(length=5000), type_=sa.Text(), existing_nullable=True )
Unapply Make connection login/password TEXT
def downgrade(): """Unapply Make connection login/password TEXT""" with op.batch_alter_table("connection", schema=None) as batch_op: batch_op.alter_column( "password", existing_type=sa.Text(), type_=sa.VARCHAR(length=5000), existing_nullable=True ) batch_op.alter_column( "login", existing_type=sa.Text(), type_=sa.VARCHAR(length=500), existing_nullable=True )
Apply Add processor_subdir to ImportError.
def upgrade(): """Apply Add processor_subdir to ImportError.""" conn = op.get_bind() with op.batch_alter_table("import_error") as batch_op: if conn.dialect.name == "mysql": batch_op.add_column(sa.Column("processor_subdir", sa.Text(length=2000), nullable=True)) else: batch_op.add_column(sa.Column("processor_subdir", sa.String(length=2000), nullable=True))
Unapply Add processor_subdir to ImportError.
def downgrade(): """Unapply Add processor_subdir to ImportError.""" conn = op.get_bind() with op.batch_alter_table("import_error", schema=None) as batch_op: batch_op.drop_column("processor_subdir")
Apply refactor dag run indexes
def upgrade(): """Apply refactor dag run indexes""" # This index may have been created in 2.7 but we've since removed it from migrations import sqlalchemy from contextlib import suppress with suppress(sqlalchemy.exc.DatabaseError): # mysql does not support drop if exists index op.drop_index("ti_state_incl_start_date", table_name="task_instance", if_exists=True)
Unapply refactor dag run indexes
def downgrade(): """Unapply refactor dag run indexes"""
Apply Add rendered_map_index to TaskInstance.
def upgrade(): """Apply Add rendered_map_index to TaskInstance.""" conn = op.get_bind() with op.batch_alter_table("task_instance") as batch_op: batch_op.add_column(sa.Column("rendered_map_index", sa.String(length=250), nullable=True))
Unapply Add rendered_map_index to TaskInstance.
def downgrade(): """Unapply Add rendered_map_index to TaskInstance.""" conn = op.get_bind() with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.drop_column("rendered_map_index")
Apply Add run_id to Log and increase Log event name length.
def upgrade(): """Apply Add run_id to Log and increase Log event name length.""" # Note: we could repopulate the run_id of old runs via a join with DagRun on date + dag_id, # But this would incur a potentially heavy migration for non-essential changes. # Instead, we've chosen to only populate this column from 2.9.0 onwards. with op.batch_alter_table("log") as batch_op: batch_op.add_column(sa.Column("run_id", StringID(), nullable=True)) batch_op.alter_column("event", type_=sa.String(60))
Unapply Add run_id to Log and increase Log event name length.
def downgrade(): """Unapply Add run_id to Log and increase Log event name length.""" with op.batch_alter_table("log") as batch_op: batch_op.drop_column("run_id") conn = op.get_bind() if conn.dialect.name == "mssql": with op.batch_alter_table("log") as batch_op: batch_op.drop_index("idx_log_event") batch_op.alter_column("event", type_=sa.String(30), nullable=False) batch_op.create_index("idx_log_event", ["event"]) else: with op.batch_alter_table("log") as batch_op: batch_op.alter_column("event", type_=sa.String(30), nullable=False)
Apply Add dataset_expression to DagModel.
def upgrade(): """Apply Add dataset_expression to DagModel.""" with op.batch_alter_table("dag") as batch_op: batch_op.add_column(sa.Column('dataset_expression', sqlalchemy_jsonfield.JSONField(json=json), nullable=True))
Unapply Add dataset_expression to DagModel.
def downgrade(): """Unapply Add dataset_expression to DagModel.""" with op.batch_alter_table("dag") as batch_op: batch_op.drop_column('dataset_expression')
Apply Adding max_consecutive_failed_dag_runs column to dag_model table
def upgrade(): """Apply Adding max_consecutive_failed_dag_runs column to dag_model table""" # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.add_column(sa.Column('max_consecutive_failed_dag_runs', sa.Integer()))
Unapply Adding max_consecutive_failed_dag_runs column to dag_model table
def downgrade(): """Unapply Adding max_consecutive_failed_dag_runs column to dag_model table""" # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.drop_column('max_consecutive_failed_dag_runs')
Apply Change value column type to longblob in xcom table for mysql
def upgrade(): """Apply Change value column type to longblob in xcom table for mysql""" conn = op.get_bind() if conn.dialect.name == "mysql": with op.batch_alter_table("xcom", schema=None) as batch_op: batch_op.alter_column("value", type_=sa.LargeBinary().with_variant(LONGBLOB, "mysql"))
Unapply Change value column type to longblob in xcom table for mysql
def downgrade(): """Unapply Change value column type to longblob in xcom table for mysql""" conn = op.get_bind() if conn.dialect.name == "mysql": with op.batch_alter_table("xcom", schema=None) as batch_op: batch_op.alter_column("value", type_=sa.LargeBinary)
Apply add display name for dag and task instance
def upgrade(): """Apply add display name for dag and task instance""" op.add_column("dag", sa.Column("dag_display_name", sa.String(2000), nullable=True)) op.add_column("task_instance", sa.Column("task_display_name", sa.String(2000), nullable=True))
Unapply add display name for dag and task instance
def downgrade(): """Unapply add display name for dag and task instance""" op.drop_column("dag", "dag_display_name") op.drop_column("task_instance", "task_display_name")
Update trigger kwargs type to string and encrypt
def upgrade(): """Update trigger kwargs type to string and encrypt""" with op.batch_alter_table("trigger") as batch_op: batch_op.alter_column("kwargs", type_=sa.Text(), ) if not context.is_offline_mode(): session = get_session() try: for trigger in session.query(Trigger).options(lazyload(Trigger.task_instance)): trigger.kwargs = trigger.kwargs session.commit() finally: session.close()
Unapply update trigger kwargs type to string and encrypt
def downgrade(): """Unapply update trigger kwargs type to string and encrypt""" if context.is_offline_mode(): print(dedent(""" ------------ -- WARNING: Unable to decrypt trigger kwargs automatically in offline mode! -- If any trigger rows exist when you do an offline downgrade, the migration will fail. ------------ """)) else: session = get_session() try: for trigger in session.query(Trigger).options(lazyload(Trigger.task_instance)): trigger.encrypted_kwargs = json.dumps(BaseSerialization.serialize(trigger.kwargs)) session.commit() finally: session.close() with op.batch_alter_table("trigger") as batch_op: batch_op.alter_column("kwargs", type_=ExtendedJSON(), postgresql_using='kwargs::json')
Apply add executor field to task instance
def upgrade(): """Apply add executor field to task instance""" op.add_column('task_instance', sa.Column('executor', sa.String(length=1000), default=None))
Unapply add executor field to task instance
def downgrade(): """Unapply add executor field to task instance""" op.drop_column('task_instance', 'executor')
Get SQLAlchemy args to use for COLLATION.
def get_id_collation_args(): """Get SQLAlchemy args to use for COLLATION.""" collation = conf.get("database", "sql_engine_collation_for_ids", fallback=None) if collation: return {"collation": collation} else: # Automatically use utf8mb3_bin collation for mysql # This is backwards-compatible. All our IDS are ASCII anyway so even if # we migrate from previously installed database with different collation and we end up mixture of # COLLATIONS, it's not a problem whatsoever (and we keep it small enough so that our indexes # for MYSQL will not exceed the maximum index size. # # See https://github.com/apache/airflow/pull/17603#issuecomment-901121618. # # We cannot use session/dialect as at this point we are trying to determine the right connection # parameters, so we use the connection conn = conf.get("database", "sql_alchemy_conn", fallback="") if conn.startswith(("mysql", "mariadb")): return {"collation": "utf8mb3_bin"} return {}
Given a number of tasks, builds a dependency chain. This function accepts values of BaseOperator (aka tasks), EdgeModifiers (aka Labels), XComArg, TaskGroups, or lists containing any mix of these types (or a mix in the same list). If you want to chain between two lists you must ensure they have the same length. Using classic operators/sensors: .. code-block:: python chain(t1, [t2, t3], [t4, t5], t6) is equivalent to:: / -> t2 -> t4 \ t1 -> t6 \ -> t3 -> t5 / .. code-block:: python t1.set_downstream(t2) t1.set_downstream(t3) t2.set_downstream(t4) t3.set_downstream(t5) t4.set_downstream(t6) t5.set_downstream(t6) Using task-decorated functions aka XComArgs: .. code-block:: python chain(x1(), [x2(), x3()], [x4(), x5()], x6()) is equivalent to:: / -> x2 -> x4 \ x1 -> x6 \ -> x3 -> x5 / .. code-block:: python x1 = x1() x2 = x2() x3 = x3() x4 = x4() x5 = x5() x6 = x6() x1.set_downstream(x2) x1.set_downstream(x3) x2.set_downstream(x4) x3.set_downstream(x5) x4.set_downstream(x6) x5.set_downstream(x6) Using TaskGroups: .. code-block:: python chain(t1, task_group1, task_group2, t2) t1.set_downstream(task_group1) task_group1.set_downstream(task_group2) task_group2.set_downstream(t2) It is also possible to mix between classic operator/sensor, EdgeModifiers, XComArg, and TaskGroups: .. code-block:: python chain(t1, [Label("branch one"), Label("branch two")], [x1(), x2()], task_group1, x3()) is equivalent to:: / "branch one" -> x1 \ t1 -> task_group1 -> x3 \ "branch two" -> x2 / .. code-block:: python x1 = x1() x2 = x2() x3 = x3() label1 = Label("branch one") label2 = Label("branch two") t1.set_downstream(label1) label1.set_downstream(x1) t2.set_downstream(label2) label2.set_downstream(x2) x1.set_downstream(task_group1) x2.set_downstream(task_group1) task_group1.set_downstream(x3) # or x1 = x1() x2 = x2() x3 = x3() t1.set_downstream(x1, edge_modifier=Label("branch one")) t1.set_downstream(x2, edge_modifier=Label("branch two")) x1.set_downstream(task_group1) x2.set_downstream(task_group1) task_group1.set_downstream(x3) :param tasks: Individual and/or list of tasks, EdgeModifiers, XComArgs, or TaskGroups to set dependencies
def chain(*tasks: DependencyMixin | Sequence[DependencyMixin]) -> None: r""" Given a number of tasks, builds a dependency chain. This function accepts values of BaseOperator (aka tasks), EdgeModifiers (aka Labels), XComArg, TaskGroups, or lists containing any mix of these types (or a mix in the same list). If you want to chain between two lists you must ensure they have the same length. Using classic operators/sensors: .. code-block:: python chain(t1, [t2, t3], [t4, t5], t6) is equivalent to:: / -> t2 -> t4 \ t1 -> t6 \ -> t3 -> t5 / .. code-block:: python t1.set_downstream(t2) t1.set_downstream(t3) t2.set_downstream(t4) t3.set_downstream(t5) t4.set_downstream(t6) t5.set_downstream(t6) Using task-decorated functions aka XComArgs: .. code-block:: python chain(x1(), [x2(), x3()], [x4(), x5()], x6()) is equivalent to:: / -> x2 -> x4 \ x1 -> x6 \ -> x3 -> x5 / .. code-block:: python x1 = x1() x2 = x2() x3 = x3() x4 = x4() x5 = x5() x6 = x6() x1.set_downstream(x2) x1.set_downstream(x3) x2.set_downstream(x4) x3.set_downstream(x5) x4.set_downstream(x6) x5.set_downstream(x6) Using TaskGroups: .. code-block:: python chain(t1, task_group1, task_group2, t2) t1.set_downstream(task_group1) task_group1.set_downstream(task_group2) task_group2.set_downstream(t2) It is also possible to mix between classic operator/sensor, EdgeModifiers, XComArg, and TaskGroups: .. code-block:: python chain(t1, [Label("branch one"), Label("branch two")], [x1(), x2()], task_group1, x3()) is equivalent to:: / "branch one" -> x1 \ t1 -> task_group1 -> x3 \ "branch two" -> x2 / .. code-block:: python x1 = x1() x2 = x2() x3 = x3() label1 = Label("branch one") label2 = Label("branch two") t1.set_downstream(label1) label1.set_downstream(x1) t2.set_downstream(label2) label2.set_downstream(x2) x1.set_downstream(task_group1) x2.set_downstream(task_group1) task_group1.set_downstream(x3) # or x1 = x1() x2 = x2() x3 = x3() t1.set_downstream(x1, edge_modifier=Label("branch one")) t1.set_downstream(x2, edge_modifier=Label("branch two")) x1.set_downstream(task_group1) x2.set_downstream(task_group1) task_group1.set_downstream(x3) :param tasks: Individual and/or list of tasks, EdgeModifiers, XComArgs, or TaskGroups to set dependencies """ for up_task, down_task in zip(tasks, tasks[1:]): if isinstance(up_task, DependencyMixin): up_task.set_downstream(down_task) continue if isinstance(down_task, DependencyMixin): down_task.set_upstream(up_task) continue if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence): raise TypeError(f"Chain not supported between instances of {type(up_task)} and {type(down_task)}") up_task_list = up_task down_task_list = down_task if len(up_task_list) != len(down_task_list): raise AirflowException( f"Chain not supported for different length Iterable. " f"Got {len(up_task_list)} and {len(down_task_list)}." ) for up_t, down_t in zip(up_task_list, down_task_list): up_t.set_downstream(down_t)
Set downstream dependencies for all tasks in from_tasks to all tasks in to_tasks. Using classic operators/sensors: .. code-block:: python cross_downstream(from_tasks=[t1, t2, t3], to_tasks=[t4, t5, t6]) is equivalent to:: t1 ---> t4 \ / t2 -X -> t5 / \ t3 ---> t6 .. code-block:: python t1.set_downstream(t4) t1.set_downstream(t5) t1.set_downstream(t6) t2.set_downstream(t4) t2.set_downstream(t5) t2.set_downstream(t6) t3.set_downstream(t4) t3.set_downstream(t5) t3.set_downstream(t6) Using task-decorated functions aka XComArgs: .. code-block:: python cross_downstream(from_tasks=[x1(), x2(), x3()], to_tasks=[x4(), x5(), x6()]) is equivalent to:: x1 ---> x4 \ / x2 -X -> x5 / \ x3 ---> x6 .. code-block:: python x1 = x1() x2 = x2() x3 = x3() x4 = x4() x5 = x5() x6 = x6() x1.set_downstream(x4) x1.set_downstream(x5) x1.set_downstream(x6) x2.set_downstream(x4) x2.set_downstream(x5) x2.set_downstream(x6) x3.set_downstream(x4) x3.set_downstream(x5) x3.set_downstream(x6) It is also possible to mix between classic operator/sensor and XComArg tasks: .. code-block:: python cross_downstream(from_tasks=[t1, x2(), t3], to_tasks=[x1(), t2, x3()]) is equivalent to:: t1 ---> x1 \ / x2 -X -> t2 / \ t3 ---> x3 .. code-block:: python x1 = x1() x2 = x2() x3 = x3() t1.set_downstream(x1) t1.set_downstream(t2) t1.set_downstream(x3) x2.set_downstream(x1) x2.set_downstream(t2) x2.set_downstream(x3) t3.set_downstream(x1) t3.set_downstream(t2) t3.set_downstream(x3) :param from_tasks: List of tasks or XComArgs to start from. :param to_tasks: List of tasks or XComArgs to set as downstream dependencies.
def cross_downstream( from_tasks: Sequence[DependencyMixin], to_tasks: DependencyMixin | Sequence[DependencyMixin], ): r""" Set downstream dependencies for all tasks in from_tasks to all tasks in to_tasks. Using classic operators/sensors: .. code-block:: python cross_downstream(from_tasks=[t1, t2, t3], to_tasks=[t4, t5, t6]) is equivalent to:: t1 ---> t4 \ / t2 -X -> t5 / \ t3 ---> t6 .. code-block:: python t1.set_downstream(t4) t1.set_downstream(t5) t1.set_downstream(t6) t2.set_downstream(t4) t2.set_downstream(t5) t2.set_downstream(t6) t3.set_downstream(t4) t3.set_downstream(t5) t3.set_downstream(t6) Using task-decorated functions aka XComArgs: .. code-block:: python cross_downstream(from_tasks=[x1(), x2(), x3()], to_tasks=[x4(), x5(), x6()]) is equivalent to:: x1 ---> x4 \ / x2 -X -> x5 / \ x3 ---> x6 .. code-block:: python x1 = x1() x2 = x2() x3 = x3() x4 = x4() x5 = x5() x6 = x6() x1.set_downstream(x4) x1.set_downstream(x5) x1.set_downstream(x6) x2.set_downstream(x4) x2.set_downstream(x5) x2.set_downstream(x6) x3.set_downstream(x4) x3.set_downstream(x5) x3.set_downstream(x6) It is also possible to mix between classic operator/sensor and XComArg tasks: .. code-block:: python cross_downstream(from_tasks=[t1, x2(), t3], to_tasks=[x1(), t2, x3()]) is equivalent to:: t1 ---> x1 \ / x2 -X -> t2 / \ t3 ---> x3 .. code-block:: python x1 = x1() x2 = x2() x3 = x3() t1.set_downstream(x1) t1.set_downstream(t2) t1.set_downstream(x3) x2.set_downstream(x1) x2.set_downstream(t2) x2.set_downstream(x3) t3.set_downstream(x1) t3.set_downstream(t2) t3.set_downstream(x3) :param from_tasks: List of tasks or XComArgs to start from. :param to_tasks: List of tasks or XComArgs to set as downstream dependencies. """ for task in from_tasks: task.set_downstream(to_tasks)
Simplify task dependency definition. E.g.: suppose you want precedence like so:: ╭─op2─╮ ╭─op4─╮ op1─┤ ├─├─op5─┤─op7 ╰-op3─╯ ╰-op6─╯ Then you can accomplish like so:: chain_linear(op1, [op2, op3], [op4, op5, op6], op7) :param elements: a list of operators / lists of operators
def chain_linear(*elements: DependencyMixin | Sequence[DependencyMixin]): """ Simplify task dependency definition. E.g.: suppose you want precedence like so:: ╭─op2─╮ ╭─op4─╮ op1─┤ ├─├─op5─┤─op7 ╰-op3─╯ ╰-op6─╯ Then you can accomplish like so:: chain_linear(op1, [op2, op3], [op4, op5, op6], op7) :param elements: a list of operators / lists of operators """ if not elements: raise ValueError("No tasks provided; nothing to do.") prev_elem = None deps_set = False for curr_elem in elements: if isinstance(curr_elem, EdgeModifier): raise ValueError("Labels are not supported by chain_linear") if prev_elem is not None: for task in prev_elem: task >> curr_elem if not deps_set: deps_set = True prev_elem = [curr_elem] if isinstance(curr_elem, DependencyMixin) else curr_elem if not deps_set: raise ValueError("No dependencies were set. Did you forget to expand with `*`?")
PEP-562: Lazy loaded attributes on python modules. :meta private:
def __getattr__(name): """ PEP-562: Lazy loaded attributes on python modules. :meta private: """ path = __deprecated_imports.get(name) if not path: raise AttributeError(f"module {__name__!r} has no attribute {name!r}") from airflow.utils.module_loading import import_string warnings.warn( f"Import `{__name__}.{name}` is deprecated. Please use `{path}.{name}`.", RemovedInAirflow3Warning, stacklevel=2, ) val = import_string(f"{path}.{name}") # Store for next time globals()[name] = val return val
Do not use, this method is deprecated.
def parse_netloc_to_hostname(*args, **kwargs): """Do not use, this method is deprecated.""" warnings.warn("This method is deprecated.", RemovedInAirflow3Warning, stacklevel=2) return _parse_netloc_to_hostname(*args, **kwargs)
Sanitizes the connection id and allows only specific characters to be within. Namely, it allows alphanumeric characters plus the symbols #,!,-,_,.,:,\,/ and () from 1 and up to 250 consecutive matches. If desired, the max length can be adjusted by setting `max_length`. You can try to play with the regex here: https://regex101.com/r/69033B/1 The character selection is such that it prevents the injection of javascript or executable bits to avoid any awkward behaviour in the front-end. :param conn_id: The connection id to sanitize. :param max_length: The max length of the connection ID, by default it is 250. :return: the sanitized string, `None` otherwise.
def sanitize_conn_id(conn_id: str | None, max_length=CONN_ID_MAX_LEN) -> str | None: r"""Sanitizes the connection id and allows only specific characters to be within. Namely, it allows alphanumeric characters plus the symbols #,!,-,_,.,:,\,/ and () from 1 and up to 250 consecutive matches. If desired, the max length can be adjusted by setting `max_length`. You can try to play with the regex here: https://regex101.com/r/69033B/1 The character selection is such that it prevents the injection of javascript or executable bits to avoid any awkward behaviour in the front-end. :param conn_id: The connection id to sanitize. :param max_length: The max length of the connection ID, by default it is 250. :return: the sanitized string, `None` otherwise. """ # check if `conn_id` or our match group is `None` and the `conn_id` is within the specified length. if (not isinstance(conn_id, str) or len(conn_id) > max_length) or ( res := re2.match(RE_SANITIZE_CONN_ID, conn_id) ) is None: return None # if we reach here, then we matched something, return the first match return res.group(0)
Parse a URI string to get the correct Hostname. ``urlparse(...).hostname`` or ``urlsplit(...).hostname`` returns value into the lowercase in most cases, there are some exclusion exists for specific cases such as https://bugs.python.org/issue32323 In case if expected to get a path as part of hostname path, then default behavior ``urlparse``/``urlsplit`` is unexpected.
def _parse_netloc_to_hostname(uri_parts): """ Parse a URI string to get the correct Hostname. ``urlparse(...).hostname`` or ``urlsplit(...).hostname`` returns value into the lowercase in most cases, there are some exclusion exists for specific cases such as https://bugs.python.org/issue32323 In case if expected to get a path as part of hostname path, then default behavior ``urlparse``/``urlsplit`` is unexpected. """ hostname = unquote(uri_parts.hostname or "") if "/" in hostname: hostname = uri_parts.netloc if "@" in hostname: hostname = hostname.rsplit("@", 1)[1] if ":" in hostname: hostname = hostname.split(":", 1)[0] hostname = unquote(hostname) return hostname
Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid. :return: Fernet object :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
def get_fernet(): """ Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid. :return: Fernet object :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet """ from cryptography.fernet import Fernet, MultiFernet global _fernet if _fernet: return _fernet try: fernet_key = conf.get("core", "FERNET_KEY") if not fernet_key: log.warning("empty cryptography key - values will not be stored encrypted.") _fernet = NullFernet() else: _fernet = MultiFernet( [Fernet(fernet_part.encode("utf-8")) for fernet_part in fernet_key.split(",")] ) _fernet.is_encrypted = True except (ValueError, TypeError) as value_error: raise AirflowException(f"Could not create Fernet object: {value_error}") return _fernet
Create a Timetable instance from a ``schedule_interval`` argument.
def create_timetable(interval: ScheduleIntervalArg, timezone: Timezone | FixedTimezone) -> Timetable: """Create a Timetable instance from a ``schedule_interval`` argument.""" if interval is NOTSET: return DeltaDataIntervalTimetable(DEFAULT_SCHEDULE_INTERVAL) if interval is None: return NullTimetable() if interval == "@once": return OnceTimetable() if interval == "@continuous": return ContinuousTimetable() if isinstance(interval, (timedelta, relativedelta)): return DeltaDataIntervalTimetable(interval) if isinstance(interval, str): if airflow_conf.getboolean("scheduler", "create_cron_data_intervals"): return CronDataIntervalTimetable(interval, timezone) else: return CronTriggerTimetable(interval, timezone=timezone) raise ValueError(f"{interval!r} is not a valid schedule_interval.")
Return the last dag run for a dag, None if there was none. Last dag run can be any type of run e.g. scheduled or backfilled. Overridden DagRuns are ignored.
def get_last_dagrun(dag_id, session, include_externally_triggered=False): """ Return the last dag run for a dag, None if there was none. Last dag run can be any type of run e.g. scheduled or backfilled. Overridden DagRuns are ignored. """ DR = DagRun query = select(DR).where(DR.dag_id == dag_id) if not include_externally_triggered: query = query.where(DR.external_trigger == expression.false()) query = query.order_by(DR.execution_date.desc()) return session.scalar(query.limit(1))
Get next run info for a list of dag_ids. Given a list of dag_ids, get string representing how close any that are dataset triggered are their next run, e.g. "1 of 2 datasets updated".
def get_dataset_triggered_next_run_info( dag_ids: list[str], *, session: Session ) -> dict[str, dict[str, int | str]]: """ Get next run info for a list of dag_ids. Given a list of dag_ids, get string representing how close any that are dataset triggered are their next run, e.g. "1 of 2 datasets updated". """ from airflow.models.dataset import DagScheduleDatasetReference, DatasetDagRunQueue as DDRQ, DatasetModel return { x.dag_id: { "uri": x.uri, "ready": x.ready, "total": x.total, } for x in session.execute( select( DagScheduleDatasetReference.dag_id, # This is a dirty hack to workaround group by requiring an aggregate, # since grouping by dataset is not what we want to do here...but it works case((func.count() == 1, func.max(DatasetModel.uri)), else_="").label("uri"), func.count().label("total"), func.sum(case((DDRQ.target_dag_id.is_not(None), 1), else_=0)).label("ready"), ) .join( DDRQ, and_( DDRQ.dataset_id == DagScheduleDatasetReference.dataset_id, DDRQ.target_dag_id == DagScheduleDatasetReference.dag_id, ), isouter=True, ) .join(DatasetModel, DatasetModel.id == DagScheduleDatasetReference.dataset_id) .group_by(DagScheduleDatasetReference.dag_id) .where(DagScheduleDatasetReference.dag_id.in_(dag_ids)) ).all() }
Python dag decorator which wraps a function into an Airflow DAG. Accepts kwargs for operator kwarg. Can be used to parameterize DAGs. :param dag_args: Arguments for DAG object :param dag_kwargs: Kwargs for DAG object.
def dag( dag_id: str = "", description: str | None = None, schedule: ScheduleArg = NOTSET, schedule_interval: ScheduleIntervalArg = NOTSET, timetable: Timetable | None = None, start_date: datetime | None = None, end_date: datetime | None = None, full_filepath: str | None = None, template_searchpath: str | Iterable[str] | None = None, template_undefined: type[jinja2.StrictUndefined] = jinja2.StrictUndefined, user_defined_macros: dict | None = None, user_defined_filters: dict | None = None, default_args: dict | None = None, concurrency: int | None = None, max_active_tasks: int = airflow_conf.getint("core", "max_active_tasks_per_dag"), max_active_runs: int = airflow_conf.getint("core", "max_active_runs_per_dag"), max_consecutive_failed_dag_runs: int = airflow_conf.getint( "core", "max_consecutive_failed_dag_runs_per_dag" ), dagrun_timeout: timedelta | None = None, sla_miss_callback: None | SLAMissCallback | list[SLAMissCallback] = None, default_view: str = airflow_conf.get_mandatory_value("webserver", "dag_default_view").lower(), orientation: str = airflow_conf.get_mandatory_value("webserver", "dag_orientation"), catchup: bool = airflow_conf.getboolean("scheduler", "catchup_by_default"), on_success_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None, on_failure_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None, doc_md: str | None = None, params: abc.MutableMapping | None = None, access_control: dict | None = None, is_paused_upon_creation: bool | None = None, jinja_environment_kwargs: dict | None = None, render_template_as_native_obj: bool = False, tags: list[str] | None = None, owner_links: dict[str, str] | None = None, auto_register: bool = True, fail_stop: bool = False, dag_display_name: str | None = None, ) -> Callable[[Callable], Callable[..., DAG]]: """ Python dag decorator which wraps a function into an Airflow DAG. Accepts kwargs for operator kwarg. Can be used to parameterize DAGs. :param dag_args: Arguments for DAG object :param dag_kwargs: Kwargs for DAG object. """ def wrapper(f: Callable) -> Callable[..., DAG]: @functools.wraps(f) def factory(*args, **kwargs): # Generate signature for decorated function and bind the arguments when called # we do this to extract parameters, so we can annotate them on the DAG object. # In addition, this fails if we are missing any args/kwargs with TypeError as expected. f_sig = signature(f).bind(*args, **kwargs) # Apply defaults to capture default values if set. f_sig.apply_defaults() # Initialize DAG with bound arguments with DAG( dag_id or f.__name__, description=description, schedule_interval=schedule_interval, timetable=timetable, start_date=start_date, end_date=end_date, full_filepath=full_filepath, template_searchpath=template_searchpath, template_undefined=template_undefined, user_defined_macros=user_defined_macros, user_defined_filters=user_defined_filters, default_args=default_args, concurrency=concurrency, max_active_tasks=max_active_tasks, max_active_runs=max_active_runs, max_consecutive_failed_dag_runs=max_consecutive_failed_dag_runs, dagrun_timeout=dagrun_timeout, sla_miss_callback=sla_miss_callback, default_view=default_view, orientation=orientation, catchup=catchup, on_success_callback=on_success_callback, on_failure_callback=on_failure_callback, doc_md=doc_md, params=params, access_control=access_control, is_paused_upon_creation=is_paused_upon_creation, jinja_environment_kwargs=jinja_environment_kwargs, render_template_as_native_obj=render_template_as_native_obj, tags=tags, schedule=schedule, owner_links=owner_links, auto_register=auto_register, fail_stop=fail_stop, dag_display_name=dag_display_name, ) as dag_obj: # Set DAG documentation from function documentation if it exists and doc_md is not set. if f.__doc__ and not dag_obj.doc_md: dag_obj.doc_md = f.__doc__ # Generate DAGParam for each function arg/kwarg and replace it for calling the function. # All args/kwargs for function will be DAGParam object and replaced on execution time. f_kwargs = {} for name, value in f_sig.arguments.items(): f_kwargs[name] = dag_obj.param(name, value) # set file location to caller source path back = sys._getframe().f_back dag_obj.fileloc = back.f_code.co_filename if back else "" # Invoke function to create operators in the DAG scope. f(**f_kwargs) # Return dag object such that it's accessible in Globals. return dag_obj # Ensure that warnings from inside DAG() are emitted from the caller, not here fixup_decorator_warning_stack(factory) return factory return wrapper
Run a single task instance, and push result to Xcom for downstream tasks. Bypasses a lot of extra steps used in `task.run` to keep our local running as fast as possible. This function is only meant for the `dag.test` function as a helper function. Args: ti: TaskInstance to run
def _run_task(*, ti: TaskInstance, inline_trigger: bool = False, session: Session): """ Run a single task instance, and push result to Xcom for downstream tasks. Bypasses a lot of extra steps used in `task.run` to keep our local running as fast as possible. This function is only meant for the `dag.test` function as a helper function. Args: ti: TaskInstance to run """ log.info("[DAG TEST] starting task_id=%s map_index=%s", ti.task_id, ti.map_index) while True: try: log.info("[DAG TEST] running task %s", ti) ti._run_raw_task(session=session, raise_on_defer=inline_trigger) break except TaskDeferred as e: log.info("[DAG TEST] running trigger in line") event = _run_inline_trigger(e.trigger) ti.next_method = e.method_name ti.next_kwargs = {"event": event.payload} if event else e.kwargs log.info("[DAG TEST] Trigger completed") session.merge(ti) session.commit() log.info("[DAG TEST] end task task_id=%s map_index=%s", ti.task_id, ti.map_index)
Create a DAG run, replacing an existing instance if needed to prevent collisions. This function is only meant to be used by :meth:`DAG.test` as a helper function. :param dag: DAG to be used to find run. :param conf: Configuration to pass to newly created run. :param start_date: Start date of new run. :param execution_date: Logical date for finding an existing run. :param run_id: Run ID for the new DAG run. :return: The newly created DAG run.
def _get_or_create_dagrun( dag: DAG, conf: dict[Any, Any] | None, start_date: datetime, execution_date: datetime, run_id: str, session: Session, data_interval: tuple[datetime, datetime] | None = None, ) -> DagRun: """Create a DAG run, replacing an existing instance if needed to prevent collisions. This function is only meant to be used by :meth:`DAG.test` as a helper function. :param dag: DAG to be used to find run. :param conf: Configuration to pass to newly created run. :param start_date: Start date of new run. :param execution_date: Logical date for finding an existing run. :param run_id: Run ID for the new DAG run. :return: The newly created DAG run. """ log.info("dagrun id: %s", dag.dag_id) dr: DagRun = session.scalar( select(DagRun).where(DagRun.dag_id == dag.dag_id, DagRun.execution_date == execution_date) ) if dr: session.delete(dr) session.commit() dr = dag.create_dagrun( state=DagRunState.RUNNING, execution_date=execution_date, run_id=run_id, start_date=start_date or execution_date, session=session, conf=conf, data_interval=data_interval, ) log.info("created dagrun %s", dr) return dr
Creator the ``note`` association proxy.
def _creator_note(val): """Creator the ``note`` association proxy.""" if isinstance(val, str): return DagRunNote(content=val) elif isinstance(val, dict): return DagRunNote(**val) else: return DagRunNote(*val)
Whether a task needs expansion at runtime. A task needs expansion if it either * Is a mapped operator, or * Is in a mapped task group. This is implemented as a free function (instead of a property) so we can make it a type guard.
def needs_expansion(task: AbstractOperator) -> TypeGuard[Operator]: """Whether a task needs expansion at runtime. A task needs expansion if it either * Is a mapped operator, or * Is in a mapped task group. This is implemented as a free function (instead of a property) so we can make it a type guard. """ if isinstance(task, MappedOperator): return True if task.get_closest_mapped_task_group() is not None: return True return False
Merge, validate params, and convert them into a simple dict.
def process_params( dag: DAG, task: Operator, dag_run: DagRun | DagRunPydantic | None, *, suppress_exception: bool, ) -> dict[str, Any]: """Merge, validate params, and convert them into a simple dict.""" from airflow.configuration import conf params = ParamsDict(suppress_exception=suppress_exception) with contextlib.suppress(AttributeError): params.update(dag.params) if task.params: params.update(task.params) if conf.getboolean("core", "dag_run_conf_overrides_params") and dag_run and dag_run.conf: logger.debug("Updating task params (%s) with DagRun.conf (%s)", params, dag_run.conf) params.update(dag_run.conf) return params.validate()
Get and serialize the template fields for a task. Used in preparing to store them in RTIF table. :param task: Operator instance with rendered template fields :meta private:
def get_serialized_template_fields(task: Operator): """ Get and serialize the template fields for a task. Used in preparing to store them in RTIF table. :param task: Operator instance with rendered template fields :meta private: """ return {field: serialize_template_field(getattr(task, field), field) for field in task.template_fields}
Set the current execution context to the provided context object. This method should be called once per Task execution, before calling operator.execute.
def set_current_context(context: Context) -> Generator[Context, None, None]: """ Set the current execution context to the provided context object. This method should be called once per Task execution, before calling operator.execute. """ _CURRENT_CONTEXT.append(context) try: yield context finally: expected_state = _CURRENT_CONTEXT.pop() if expected_state != context: log.warning( "Current context is not equal to the state at context stack. Expected=%s, got=%s", context, expected_state, )
Stop non-teardown tasks in dag. :meta private:
def _stop_remaining_tasks(*, task_instance: TaskInstance | TaskInstancePydantic, session: Session): """ Stop non-teardown tasks in dag. :meta private: """ if not task_instance.dag_run: raise ValueError("``task_instance`` must have ``dag_run`` set") tis = task_instance.dag_run.get_task_instances(session=session) if TYPE_CHECKING: assert task_instance.task assert isinstance(task_instance.task.dag, DAG) for ti in tis: if ti.task_id == task_instance.task_id or ti.state in ( TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, ): continue task = task_instance.task.dag.task_dict[ti.task_id] if not task.is_teardown: if ti.state == TaskInstanceState.RUNNING: log.info("Forcing task %s to fail due to dag's `fail_stop` setting", ti.task_id) ti.error(session) else: log.info("Setting task %s to SKIPPED due to dag's `fail_stop` setting.", ti.task_id) ti.set_state(state=TaskInstanceState.SKIPPED, session=session) else: log.info("Not skipping teardown task '%s'", ti.task_id)
Clear a set of task instances, but make sure the running ones get killed. Also sets Dagrun's `state` to QUEUED and `start_date` to the time of execution. But only for finished DRs (SUCCESS and FAILED). Doesn't clear DR's `state` and `start_date`for running DRs (QUEUED and RUNNING) because clearing the state for already running DR is redundant and clearing `start_date` affects DR's duration. :param tis: a list of task instances :param session: current session :param dag_run_state: state to set finished DagRuns to. If set to False, DagRuns state will not be changed. :param dag: DAG object :param activate_dag_runs: Deprecated parameter, do not pass
def clear_task_instances( tis: list[TaskInstance], session: Session, activate_dag_runs: None = None, dag: DAG | None = None, dag_run_state: DagRunState | Literal[False] = DagRunState.QUEUED, ) -> None: """ Clear a set of task instances, but make sure the running ones get killed. Also sets Dagrun's `state` to QUEUED and `start_date` to the time of execution. But only for finished DRs (SUCCESS and FAILED). Doesn't clear DR's `state` and `start_date`for running DRs (QUEUED and RUNNING) because clearing the state for already running DR is redundant and clearing `start_date` affects DR's duration. :param tis: a list of task instances :param session: current session :param dag_run_state: state to set finished DagRuns to. If set to False, DagRuns state will not be changed. :param dag: DAG object :param activate_dag_runs: Deprecated parameter, do not pass """ job_ids = [] # Keys: dag_id -> run_id -> map_indexes -> try_numbers -> task_id task_id_by_key: dict[str, dict[str, dict[int, dict[int, set[str]]]]] = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(set))) ) dag_bag = DagBag(read_dags_from_db=True) for ti in tis: if ti.state == TaskInstanceState.RUNNING: if ti.job_id: # If a task is cleared when running, set its state to RESTARTING so that # the task is terminated and becomes eligible for retry. ti.state = TaskInstanceState.RESTARTING job_ids.append(ti.job_id) else: ti_dag = dag if dag and dag.dag_id == ti.dag_id else dag_bag.get_dag(ti.dag_id, session=session) task_id = ti.task_id if ti_dag and ti_dag.has_task(task_id): task = ti_dag.get_task(task_id) ti.refresh_from_task(task) if TYPE_CHECKING: assert ti.task task_retries = task.retries ti.max_tries = ti.try_number + task_retries - 1 else: # Ignore errors when updating max_tries if the DAG or # task are not found since database records could be # outdated. We make max_tries the maximum value of its # original max_tries or the last attempted try number. ti.max_tries = max(ti.max_tries, ti.prev_attempted_tries) ti.state = None ti.external_executor_id = None ti.clear_next_method_args() session.merge(ti) task_id_by_key[ti.dag_id][ti.run_id][ti.map_index][ti.try_number].add(ti.task_id) if task_id_by_key: # Clear all reschedules related to the ti to clear # This is an optimization for the common case where all tis are for a small number # of dag_id, run_id, try_number, and map_index. Use a nested dict of dag_id, # run_id, try_number, map_index, and task_id to construct the where clause in a # hierarchical manner. This speeds up the delete statement by more than 40x for # large number of tis (50k+). conditions = or_( and_( TR.dag_id == dag_id, or_( and_( TR.run_id == run_id, or_( and_( TR.map_index == map_index, or_( and_(TR.try_number == try_number, TR.task_id.in_(task_ids)) for try_number, task_ids in task_tries.items() ), ) for map_index, task_tries in map_indexes.items() ), ) for run_id, map_indexes in run_ids.items() ), ) for dag_id, run_ids in task_id_by_key.items() ) delete_qry = TR.__table__.delete().where(conditions) session.execute(delete_qry) if job_ids: from airflow.jobs.job import Job session.execute(update(Job).where(Job.id.in_(job_ids)).values(state=JobState.RESTARTING)) if activate_dag_runs is not None: warnings.warn( "`activate_dag_runs` parameter to clear_task_instances function is deprecated. " "Please use `dag_run_state`", RemovedInAirflow3Warning, stacklevel=2, ) if not activate_dag_runs: dag_run_state = False if dag_run_state is not False and tis: from airflow.models.dagrun import DagRun # Avoid circular import run_ids_by_dag_id = defaultdict(set) for instance in tis: run_ids_by_dag_id[instance.dag_id].add(instance.run_id) drs = ( session.query(DagRun) .filter( or_( and_(DagRun.dag_id == dag_id, DagRun.run_id.in_(run_ids)) for dag_id, run_ids in run_ids_by_dag_id.items() ) ) .all() ) dag_run_state = DagRunState(dag_run_state) # Validate the state value. for dr in drs: if dr.state in State.finished_dr_states: dr.state = dag_run_state dr.start_date = timezone.utcnow() if dag_run_state == DagRunState.QUEUED: dr.last_scheduling_decision = None dr.start_date = None dr.clear_number += 1 session.flush()
Whether a value can be used for task mapping. We only allow collections with guaranteed ordering, but exclude character sequences since that's usually not what users would expect to be mappable.
def _is_mappable_value(value: Any) -> TypeGuard[Collection]: """Whether a value can be used for task mapping. We only allow collections with guaranteed ordering, but exclude character sequences since that's usually not what users would expect to be mappable. """ if not isinstance(value, (collections.abc.Sequence, dict)): return False if isinstance(value, (bytearray, bytes, str)): return False return True
Creator the ``note`` association proxy.
def _creator_note(val): """Creator the ``note`` association proxy.""" if isinstance(val, str): return TaskInstanceNote(content=val) elif isinstance(val, dict): return TaskInstanceNote(**val) else: return TaskInstanceNote(*val)
Execute Task (optionally with a Timeout) and push Xcom results. :param task_instance: the task instance :param context: Jinja2 context :param task_orig: origin task :meta private:
def _execute_task(task_instance: TaskInstance | TaskInstancePydantic, context: Context, task_orig: Operator): """ Execute Task (optionally with a Timeout) and push Xcom results. :param task_instance: the task instance :param context: Jinja2 context :param task_orig: origin task :meta private: """ task_to_execute = task_instance.task if TYPE_CHECKING: assert task_to_execute if isinstance(task_to_execute, MappedOperator): raise AirflowException("MappedOperator cannot be executed.") # If the task has been deferred and is being executed due to a trigger, # then we need to pick the right method to come back to, otherwise # we go for the default execute execute_callable_kwargs: dict[str, Any] = {} execute_callable: Callable if task_instance.next_method: if task_instance.next_method == "execute": if not task_instance.next_kwargs: task_instance.next_kwargs = {} task_instance.next_kwargs[f"{task_to_execute.__class__.__name__}__sentinel"] = _sentinel execute_callable = task_to_execute.resume_execution execute_callable_kwargs["next_method"] = task_instance.next_method execute_callable_kwargs["next_kwargs"] = task_instance.next_kwargs else: execute_callable = task_to_execute.execute if execute_callable.__name__ == "execute": execute_callable_kwargs[f"{task_to_execute.__class__.__name__}__sentinel"] = _sentinel def _execute_callable(context: Context, **execute_callable_kwargs): try: # Print a marker for log grouping of details before task execution log.info("::endgroup::") return ExecutionCallableRunner( execute_callable, context_get_dataset_events(context), logger=log, ).run(context=context, **execute_callable_kwargs) except SystemExit as e: # Handle only successful cases here. Failure cases will be handled upper # in the exception chain. if e.code is not None and e.code != 0: raise return None finally: # Print a marker post execution for internals of post task processing log.info("::group::Post task execution logs") # If a timeout is specified for the task, make it fail # if it goes beyond if task_to_execute.execution_timeout: # If we are coming in with a next_method (i.e. from a deferral), # calculate the timeout from our start_date. if task_instance.next_method and task_instance.start_date: timeout_seconds = ( task_to_execute.execution_timeout - (timezone.utcnow() - task_instance.start_date) ).total_seconds() else: timeout_seconds = task_to_execute.execution_timeout.total_seconds() try: # It's possible we're already timed out, so fast-fail if true if timeout_seconds <= 0: raise AirflowTaskTimeout() # Run task in timeout wrapper with timeout(timeout_seconds): result = _execute_callable(context=context, **execute_callable_kwargs) except AirflowTaskTimeout: task_to_execute.on_kill() raise else: result = _execute_callable(context=context, **execute_callable_kwargs) cm = nullcontext() if InternalApiConfig.get_use_internal_api() else create_session() with cm as session_or_null: if task_to_execute.do_xcom_push: xcom_value = result else: xcom_value = None if xcom_value is not None: # If the task returns a result, push an XCom containing it. if task_to_execute.multiple_outputs: if not isinstance(xcom_value, Mapping): raise AirflowException( f"Returned output was type {type(xcom_value)} " "expected dictionary for multiple_outputs" ) for key in xcom_value.keys(): if not isinstance(key, str): raise AirflowException( "Returned dictionary keys must be strings when using " f"multiple_outputs, found {key} ({type(key)}) instead" ) for key, value in xcom_value.items(): task_instance.xcom_push(key=key, value=value, session=session_or_null) task_instance.xcom_push(key=XCOM_RETURN_KEY, value=xcom_value, session=session_or_null) _record_task_map_for_downstreams( task_instance=task_instance, task=task_orig, value=xcom_value, session=session_or_null ) return result
Refresh the task instance from the database based on the primary key. :param task_instance: the task instance :param session: SQLAlchemy ORM Session :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. :meta private:
def _refresh_from_db( *, task_instance: TaskInstance | TaskInstancePydantic, session: Session | None = None, lock_for_update: bool = False, ) -> None: """ Refresh the task instance from the database based on the primary key. :param task_instance: the task instance :param session: SQLAlchemy ORM Session :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. :meta private: """ if session and task_instance in session: session.refresh(task_instance, TaskInstance.__mapper__.column_attrs.keys()) ti = TaskInstance.get_task_instance( dag_id=task_instance.dag_id, task_id=task_instance.task_id, run_id=task_instance.run_id, map_index=task_instance.map_index, lock_for_update=lock_for_update, session=session, ) if ti: # Fields ordered per model definition task_instance.start_date = ti.start_date task_instance.end_date = ti.end_date task_instance.duration = ti.duration task_instance.state = ti.state task_instance.try_number = _get_private_try_number(task_instance=ti) task_instance.max_tries = ti.max_tries task_instance.hostname = ti.hostname task_instance.unixname = ti.unixname task_instance.job_id = ti.job_id task_instance.pool = ti.pool task_instance.pool_slots = ti.pool_slots or 1 task_instance.queue = ti.queue task_instance.priority_weight = ti.priority_weight task_instance.operator = ti.operator task_instance.custom_operator_name = ti.custom_operator_name task_instance.queued_dttm = ti.queued_dttm task_instance.queued_by_job_id = ti.queued_by_job_id task_instance.pid = ti.pid task_instance.executor = ti.executor task_instance.executor_config = ti.executor_config task_instance.external_executor_id = ti.external_executor_id task_instance.trigger_id = ti.trigger_id task_instance.next_method = ti.next_method task_instance.next_kwargs = ti.next_kwargs else: task_instance.state = None
Set task instance duration. :param task_instance: the task instance :meta private:
def _set_duration(*, task_instance: TaskInstance | TaskInstancePydantic) -> None: """ Set task instance duration. :param task_instance: the task instance :meta private: """ if task_instance.end_date and task_instance.start_date: task_instance.duration = (task_instance.end_date - task_instance.start_date).total_seconds() else: task_instance.duration = None log.debug("Task Duration set to %s", task_instance.duration)
Return task instance tags. :param task_instance: the task instance :meta private:
def _stats_tags(*, task_instance: TaskInstance | TaskInstancePydantic) -> dict[str, str]: """ Return task instance tags. :param task_instance: the task instance :meta private: """ return prune_dict({"dag_id": task_instance.dag_id, "task_id": task_instance.task_id})
Ensure we unset next_method and next_kwargs to ensure that any retries don't reuse them. :param task_instance: the task instance :meta private:
def _clear_next_method_args(*, task_instance: TaskInstance | TaskInstancePydantic) -> None: """ Ensure we unset next_method and next_kwargs to ensure that any retries don't reuse them. :param task_instance: the task instance :meta private: """ log.debug("Clearing next_method and next_kwargs.") task_instance.next_method = None task_instance.next_kwargs = None
Return TI Context. :param task_instance: the task instance :param session: SQLAlchemy ORM Session :param ignore_param_exceptions: flag to suppress value exceptions while initializing the ParamsDict :meta private:
def _get_template_context( *, task_instance: TaskInstance | TaskInstancePydantic, session: Session | None = None, ignore_param_exceptions: bool = True, ) -> Context: """ Return TI Context. :param task_instance: the task instance :param session: SQLAlchemy ORM Session :param ignore_param_exceptions: flag to suppress value exceptions while initializing the ParamsDict :meta private: """ # Do not use provide_session here -- it expunges everything on exit! if not session: session = settings.Session() from airflow import macros from airflow.models.abstractoperator import NotMapped integrate_macros_plugins() task = task_instance.task if TYPE_CHECKING: assert task_instance.task assert task assert task.dag try: dag: DAG = task.dag except AirflowException: from airflow.serialization.pydantic.taskinstance import TaskInstancePydantic if isinstance(task_instance, TaskInstancePydantic): ti = session.scalar( select(TaskInstance).where( TaskInstance.task_id == task_instance.task_id, TaskInstance.dag_id == task_instance.dag_id, TaskInstance.run_id == task_instance.run_id, TaskInstance.map_index == task_instance.map_index, ) ) dag = ti.dag_model.serialized_dag.dag if hasattr(task_instance.task, "_dag"): # BaseOperator task_instance.task._dag = dag else: # MappedOperator task_instance.task.dag = dag else: raise dag_run = task_instance.get_dagrun(session) data_interval = dag.get_run_data_interval(dag_run) validated_params = process_params(dag, task, dag_run, suppress_exception=ignore_param_exceptions) logical_date: DateTime = timezone.coerce_datetime(task_instance.execution_date) ds = logical_date.strftime("%Y-%m-%d") ds_nodash = ds.replace("-", "") ts = logical_date.isoformat() ts_nodash = logical_date.strftime("%Y%m%dT%H%M%S") ts_nodash_with_tz = ts.replace("-", "").replace(":", "") @cache # Prevent multiple database access. def _get_previous_dagrun_success() -> DagRun | None: return task_instance.get_previous_dagrun(state=DagRunState.SUCCESS, session=session) def _get_previous_dagrun_data_interval_success() -> DataInterval | None: dagrun = _get_previous_dagrun_success() if dagrun is None: return None return dag.get_run_data_interval(dagrun) def get_prev_data_interval_start_success() -> pendulum.DateTime | None: data_interval = _get_previous_dagrun_data_interval_success() if data_interval is None: return None return data_interval.start def get_prev_data_interval_end_success() -> pendulum.DateTime | None: data_interval = _get_previous_dagrun_data_interval_success() if data_interval is None: return None return data_interval.end def get_prev_start_date_success() -> pendulum.DateTime | None: dagrun = _get_previous_dagrun_success() if dagrun is None: return None return timezone.coerce_datetime(dagrun.start_date) def get_prev_end_date_success() -> pendulum.DateTime | None: dagrun = _get_previous_dagrun_success() if dagrun is None: return None return timezone.coerce_datetime(dagrun.end_date) @cache def get_yesterday_ds() -> str: return (logical_date - timedelta(1)).strftime("%Y-%m-%d") def get_yesterday_ds_nodash() -> str: return get_yesterday_ds().replace("-", "") @cache def get_tomorrow_ds() -> str: return (logical_date + timedelta(1)).strftime("%Y-%m-%d") def get_tomorrow_ds_nodash() -> str: return get_tomorrow_ds().replace("-", "") @cache def get_next_execution_date() -> pendulum.DateTime | None: # For manually triggered dagruns that aren't run on a schedule, # the "next" execution date doesn't make sense, and should be set # to execution date for consistency with how execution_date is set # for manually triggered tasks, i.e. triggered_date == execution_date. if dag_run.external_trigger: return logical_date if dag is None: return None next_info = dag.next_dagrun_info(data_interval, restricted=False) if next_info is None: return None return timezone.coerce_datetime(next_info.logical_date) def get_next_ds() -> str | None: execution_date = get_next_execution_date() if execution_date is None: return None return execution_date.strftime("%Y-%m-%d") def get_next_ds_nodash() -> str | None: ds = get_next_ds() if ds is None: return ds return ds.replace("-", "") @cache def get_prev_execution_date(): # For manually triggered dagruns that aren't run on a schedule, # the "previous" execution date doesn't make sense, and should be set # to execution date for consistency with how execution_date is set # for manually triggered tasks, i.e. triggered_date == execution_date. if dag_run.external_trigger: return logical_date with warnings.catch_warnings(): warnings.simplefilter("ignore", RemovedInAirflow3Warning) return dag.previous_schedule(logical_date) @cache def get_prev_ds() -> str | None: execution_date = get_prev_execution_date() if execution_date is None: return None return execution_date.strftime("%Y-%m-%d") def get_prev_ds_nodash() -> str | None: prev_ds = get_prev_ds() if prev_ds is None: return None return prev_ds.replace("-", "") def get_triggering_events() -> dict[str, list[DatasetEvent | DatasetEventPydantic]]: if TYPE_CHECKING: assert session is not None # The dag_run may not be attached to the session anymore since the # code base is over-zealous with use of session.expunge_all(). # Re-attach it if we get called. nonlocal dag_run if dag_run not in session: dag_run = session.merge(dag_run, load=False) dataset_events = dag_run.consumed_dataset_events triggering_events: dict[str, list[DatasetEvent | DatasetEventPydantic]] = defaultdict(list) for event in dataset_events: if event.dataset: triggering_events[event.dataset.uri].append(event) return triggering_events try: expanded_ti_count: int | None = task.get_mapped_ti_count(task_instance.run_id, session=session) except NotMapped: expanded_ti_count = None # NOTE: If you add to this dict, make sure to also update the following: # * Context in airflow/utils/context.pyi # * KNOWN_CONTEXT_KEYS in airflow/utils/context.py # * Table in docs/apache-airflow/templates-ref.rst context: dict[str, Any] = { "conf": conf, "dag": dag, "dag_run": dag_run, "data_interval_end": timezone.coerce_datetime(data_interval.end), "data_interval_start": timezone.coerce_datetime(data_interval.start), "dataset_events": DatasetEventAccessors(), "ds": ds, "ds_nodash": ds_nodash, "execution_date": logical_date, "expanded_ti_count": expanded_ti_count, "inlets": task.inlets, "logical_date": logical_date, "macros": macros, "map_index_template": task.map_index_template, "next_ds": get_next_ds(), "next_ds_nodash": get_next_ds_nodash(), "next_execution_date": get_next_execution_date(), "outlets": task.outlets, "params": validated_params, "prev_data_interval_start_success": get_prev_data_interval_start_success(), "prev_data_interval_end_success": get_prev_data_interval_end_success(), "prev_ds": get_prev_ds(), "prev_ds_nodash": get_prev_ds_nodash(), "prev_execution_date": get_prev_execution_date(), "prev_execution_date_success": task_instance.get_previous_execution_date( state=DagRunState.SUCCESS, session=session, ), "prev_start_date_success": get_prev_start_date_success(), "prev_end_date_success": get_prev_end_date_success(), "run_id": task_instance.run_id, "task": task, "task_instance": task_instance, "task_instance_key_str": f"{task.dag_id}__{task.task_id}__{ds_nodash}", "test_mode": task_instance.test_mode, "ti": task_instance, "tomorrow_ds": get_tomorrow_ds(), "tomorrow_ds_nodash": get_tomorrow_ds_nodash(), "triggering_dataset_events": lazy_object_proxy.Proxy(get_triggering_events), "ts": ts, "ts_nodash": ts_nodash, "ts_nodash_with_tz": ts_nodash_with_tz, "var": { "json": VariableAccessor(deserialize_json=True), "value": VariableAccessor(deserialize_json=False), }, "conn": ConnectionAccessor(), "yesterday_ds": get_yesterday_ds(), "yesterday_ds_nodash": get_yesterday_ds_nodash(), } # Mypy doesn't like turning existing dicts in to a TypeDict -- and we "lie" in the type stub to say it # is one, but in practice it isn't. See https://github.com/python/mypy/issues/8890 return Context(context)
Is task instance is eligible for retry. :param task_instance: the task instance :meta private:
def _is_eligible_to_retry(*, task_instance: TaskInstance | TaskInstancePydantic): """ Is task instance is eligible for retry. :param task_instance: the task instance :meta private: """ if task_instance.state == TaskInstanceState.RESTARTING: # If a task is cleared when running, it goes into RESTARTING state and is always # eligible for retry return True if not getattr(task_instance, "task", None): # Couldn't load the task, don't know number of retries, guess: return task_instance.try_number <= task_instance.max_tries if TYPE_CHECKING: assert task_instance.task return task_instance.task.retries and task_instance.try_number <= task_instance.max_tries
Handle Failure for a task instance. :param task_instance: the task instance :param error: if specified, log the specific exception if thrown :param session: SQLAlchemy ORM Session :param test_mode: doesn't record success or failure in the DB if True :param context: Jinja2 context :param force_fail: if True, task does not retry :meta private:
def _handle_failure( *, task_instance: TaskInstance | TaskInstancePydantic, error: None | str | BaseException, session: Session, test_mode: bool | None = None, context: Context | None = None, force_fail: bool = False, ) -> None: """ Handle Failure for a task instance. :param task_instance: the task instance :param error: if specified, log the specific exception if thrown :param session: SQLAlchemy ORM Session :param test_mode: doesn't record success or failure in the DB if True :param context: Jinja2 context :param force_fail: if True, task does not retry :meta private: """ if test_mode is None: test_mode = task_instance.test_mode failure_context = TaskInstance.fetch_handle_failure_context( ti=task_instance, error=error, test_mode=test_mode, context=context, force_fail=force_fail, session=session, ) _log_state(task_instance=task_instance, lead_msg="Immediate failure requested. " if force_fail else "") if ( failure_context["task"] and failure_context["email_for_state"](failure_context["task"]) and failure_context["task"].email ): try: task_instance.email_alert(error, failure_context["task"]) except Exception: log.exception("Failed to send email to: %s", failure_context["task"].email) if failure_context["callbacks"] and failure_context["context"]: _run_finished_callback( callbacks=failure_context["callbacks"], context=failure_context["context"], ) if not test_mode: TaskInstance.save_to_db(failure_context["ti"], session)
Return the try number that a task number will be when it is actually run. If the TaskInstance is currently running, this will match the column in the database, in all other cases this will be incremented. This is designed so that task logs end up in the right file. :param task_instance: the task instance :meta private:
def _get_try_number(*, task_instance: TaskInstance): """ Return the try number that a task number will be when it is actually run. If the TaskInstance is currently running, this will match the column in the database, in all other cases this will be incremented. This is designed so that task logs end up in the right file. :param task_instance: the task instance :meta private: """ if task_instance.state == TaskInstanceState.RUNNING: return task_instance._try_number return task_instance._try_number + 1
Opposite of _get_try_number. Given the value returned by try_number, return the value of _try_number that should produce the same result. This is needed for setting _try_number on TaskInstance from the value on PydanticTaskInstance, which has no private attrs. :param task_instance: the task instance :meta private:
def _get_private_try_number(*, task_instance: TaskInstance | TaskInstancePydantic): """ Opposite of _get_try_number. Given the value returned by try_number, return the value of _try_number that should produce the same result. This is needed for setting _try_number on TaskInstance from the value on PydanticTaskInstance, which has no private attrs. :param task_instance: the task instance :meta private: """ if task_instance.state == TaskInstanceState.RUNNING: return task_instance.try_number return task_instance.try_number - 1
Set a task try number. :param task_instance: the task instance :param value: the try number :meta private:
def _set_try_number(*, task_instance: TaskInstance | TaskInstancePydantic, value: int) -> None: """ Set a task try number. :param task_instance: the task instance :param value: the try number :meta private: """ task_instance._try_number = value
Copy common attributes from the given task. :param task_instance: the task instance :param task: The task object to copy from :param pool_override: Use the pool_override instead of task's pool :meta private:
def _refresh_from_task( *, task_instance: TaskInstance | TaskInstancePydantic, task: Operator, pool_override: str | None = None ) -> None: """ Copy common attributes from the given task. :param task_instance: the task instance :param task: The task object to copy from :param pool_override: Use the pool_override instead of task's pool :meta private: """ task_instance.task = task task_instance.queue = task.queue task_instance.pool = pool_override or task.pool task_instance.pool_slots = task.pool_slots with contextlib.suppress(Exception): # This method is called from the different places, and sometimes the TI is not fully initialized task_instance.priority_weight = task_instance.task.weight_rule.get_weight( task_instance # type: ignore[arg-type] ) task_instance.run_as_user = task.run_as_user # Do not set max_tries to task.retries here because max_tries is a cumulative # value that needs to be stored in the db. task_instance.executor = task.executor task_instance.executor_config = task.executor_config task_instance.operator = task.task_type task_instance.custom_operator_name = getattr(task, "custom_operator_name", None) # Re-apply cluster policy here so that task default do not overload previous data task_instance_mutation_hook(task_instance)
Record the task map for downstream tasks. :param task_instance: the task instance :param task: The task object :param value: The value :param session: SQLAlchemy ORM Session :meta private:
def _record_task_map_for_downstreams( *, task_instance: TaskInstance | TaskInstancePydantic, task: Operator, value: Any, session: Session ) -> None: """ Record the task map for downstream tasks. :param task_instance: the task instance :param task: The task object :param value: The value :param session: SQLAlchemy ORM Session :meta private: """ if next(task.iter_mapped_dependants(), None) is None: # No mapped dependants, no need to validate. return # TODO: We don't push TaskMap for mapped task instances because it's not # currently possible for a downstream to depend on one individual mapped # task instance. This will change when we implement task mapping inside # a mapped task group, and we'll need to further analyze the case. if isinstance(task, MappedOperator): return if value is None: raise XComForMappingNotPushed() if not _is_mappable_value(value): raise UnmappableXComTypePushed(value) task_map = TaskMap.from_task_instance_xcom(task_instance, value) max_map_length = conf.getint("core", "max_map_length", fallback=1024) if task_map.length > max_map_length: raise UnmappableXComLengthPushed(value, max_map_length) session.merge(task_map)
Return the DagRun that ran prior to this task instance's DagRun. :param task_instance: the task instance :param state: If passed, it only take into account instances of a specific state. :param session: SQLAlchemy ORM Session. :meta private:
def _get_previous_dagrun( *, task_instance: TaskInstance | TaskInstancePydantic, state: DagRunState | None = None, session: Session | None = None, ) -> DagRun | None: """ Return the DagRun that ran prior to this task instance's DagRun. :param task_instance: the task instance :param state: If passed, it only take into account instances of a specific state. :param session: SQLAlchemy ORM Session. :meta private: """ if TYPE_CHECKING: assert task_instance.task dag = task_instance.task.dag if dag is None: return None dr = task_instance.get_dagrun(session=session) dr.dag = dag from airflow.models.dagrun import DagRun # Avoid circular import # We always ignore schedule in dagrun lookup when `state` is given # or the DAG is never scheduled. For legacy reasons, when # `catchup=True`, we use `get_previous_scheduled_dagrun` unless # `ignore_schedule` is `True`. ignore_schedule = state is not None or not dag.timetable.can_be_scheduled if dag.catchup is True and not ignore_schedule: last_dagrun = DagRun.get_previous_scheduled_dagrun(dr.id, session=session) else: last_dagrun = DagRun.get_previous_dagrun(dag_run=dr, session=session, state=state) if last_dagrun: return last_dagrun return None
Get execution date from property previous_ti_success. :param task_instance: the task instance :param session: SQLAlchemy ORM Session :param state: If passed, it only take into account instances of a specific state. :meta private:
def _get_previous_execution_date( *, task_instance: TaskInstance | TaskInstancePydantic, state: DagRunState | None, session: Session, ) -> pendulum.DateTime | None: """ Get execution date from property previous_ti_success. :param task_instance: the task instance :param session: SQLAlchemy ORM Session :param state: If passed, it only take into account instances of a specific state. :meta private: """ log.debug("previous_execution_date was called") prev_ti = task_instance.get_previous_ti(state=state, session=session) return pendulum.instance(prev_ti.execution_date) if prev_ti and prev_ti.execution_date else None
Send alert email with exception information. :param task_instance: the task instance :param exception: the exception :param task: task related to the exception :meta private:
def _email_alert( *, task_instance: TaskInstance | TaskInstancePydantic, exception, task: BaseOperator ) -> None: """ Send alert email with exception information. :param task_instance: the task instance :param exception: the exception :param task: task related to the exception :meta private: """ subject, html_content, html_content_err = task_instance.get_email_subject_content(exception, task=task) if TYPE_CHECKING: assert task.email try: send_email(task.email, subject, html_content) except Exception: send_email(task.email, subject, html_content_err)
Get the email subject content for exceptions. :param task_instance: the task instance :param exception: the exception sent in the email :param task: :meta private:
def _get_email_subject_content( *, task_instance: TaskInstance | TaskInstancePydantic, exception: BaseException, task: BaseOperator | None = None, ) -> tuple[str, str, str]: """ Get the email subject content for exceptions. :param task_instance: the task instance :param exception: the exception sent in the email :param task: :meta private: """ # For a ti from DB (without ti.task), return the default value if task is None: task = getattr(task_instance, "task") use_default = task is None exception_html = str(exception).replace("\n", "<br>") default_subject = "Airflow alert: {{ti}}" # For reporting purposes, we report based on 1-indexed, # not 0-indexed lists (i.e. Try 1 instead of # Try 0 for the first attempt). default_html_content = ( "Try {{try_number}} out of {{max_tries + 1}}<br>" "Exception:<br>{{exception_html}}<br>" 'Log: <a href="{{ti.log_url}}">Link</a><br>' "Host: {{ti.hostname}}<br>" 'Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>' ) default_html_content_err = ( "Try {{try_number}} out of {{max_tries + 1}}<br>" "Exception:<br>Failed attempt to attach error logs<br>" 'Log: <a href="{{ti.log_url}}">Link</a><br>' "Host: {{ti.hostname}}<br>" 'Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>' ) # This function is called after changing the state from RUNNING, # so we need to subtract 1 from self.try_number here. current_try_number = task_instance.try_number - 1 additional_context: dict[str, Any] = { "exception": exception, "exception_html": exception_html, "try_number": current_try_number, "max_tries": task_instance.max_tries, } if use_default: default_context = {"ti": task_instance, **additional_context} jinja_env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=True ) subject = jinja_env.from_string(default_subject).render(**default_context) html_content = jinja_env.from_string(default_html_content).render(**default_context) html_content_err = jinja_env.from_string(default_html_content_err).render(**default_context) else: if TYPE_CHECKING: assert task_instance.task # Use the DAG's get_template_env() to set force_sandboxed. Don't add # the flag to the function on task object -- that function can be # overridden, and adding a flag breaks backward compatibility. dag = task_instance.task.get_dag() if dag: jinja_env = dag.get_template_env(force_sandboxed=True) else: jinja_env = SandboxedEnvironment(cache_size=0) jinja_context = task_instance.get_template_context() context_merge(jinja_context, additional_context) def render(key: str, content: str) -> str: if conf.has_option("email", key): path = conf.get_mandatory_value("email", key) try: with open(path) as f: content = f.read() except FileNotFoundError: log.warning("Could not find email template file '%s'. Using defaults...", path) except OSError: log.exception("Error while using email template %s. Using defaults...", path) return render_template_to_string(jinja_env.from_string(content), jinja_context) subject = render("subject_template", default_subject) html_content = render("html_content_template", default_html_content) html_content_err = render("html_content_template", default_html_content_err) return subject, html_content, html_content_err
Run callback after task finishes. :param callbacks: callbacks to run :param context: callbacks context :meta private:
def _run_finished_callback( *, callbacks: None | TaskStateChangeCallback | list[TaskStateChangeCallback], context: Context, ) -> None: """ Run callback after task finishes. :param callbacks: callbacks to run :param context: callbacks context :meta private: """ if callbacks: callbacks = callbacks if isinstance(callbacks, list) else [callbacks] for callback in callbacks: log.info("Executing %s callback", callback.__name__) try: callback(context) except Exception: log.exception("Error when executing %s callback", callback.__name__)
Log task state. :param task_instance: the task instance :param lead_msg: lead message :meta private:
def _log_state(*, task_instance: TaskInstance | TaskInstancePydantic, lead_msg: str = "") -> None: """ Log task state. :param task_instance: the task instance :param lead_msg: lead message :meta private: """ params = [ lead_msg, str(task_instance.state).upper(), task_instance.dag_id, task_instance.task_id, task_instance.run_id, ] message = "%sMarking task as %s. dag_id=%s, task_id=%s, run_id=%s, " if task_instance.map_index >= 0: params.append(task_instance.map_index) message += "map_index=%d, " message += "execution_date=%s, start_date=%s, end_date=%s" log.info( message, *params, _date_or_empty(task_instance=task_instance, attr="execution_date"), _date_or_empty(task_instance=task_instance, attr="start_date"), _date_or_empty(task_instance=task_instance, attr="end_date"), )
Fetch a date attribute or None of it does not exist. :param task_instance: the task instance :param attr: the attribute name :meta private:
def _date_or_empty(*, task_instance: TaskInstance | TaskInstancePydantic, attr: str) -> str: """ Fetch a date attribute or None of it does not exist. :param task_instance: the task instance :param attr: the attribute name :meta private: """ result: datetime | None = getattr(task_instance, attr, None) return result.strftime("%Y%m%dT%H%M%S") if result else ""
Get task instance for the task that ran before this task instance. :param task_instance: the task instance :param state: If passed, it only take into account instances of a specific state. :param session: SQLAlchemy ORM Session :meta private:
def _get_previous_ti( *, task_instance: TaskInstance | TaskInstancePydantic, session: Session, state: DagRunState | None = None, ) -> TaskInstance | TaskInstancePydantic | None: """ Get task instance for the task that ran before this task instance. :param task_instance: the task instance :param state: If passed, it only take into account instances of a specific state. :param session: SQLAlchemy ORM Session :meta private: """ dagrun = task_instance.get_previous_dagrun(state, session=session) if dagrun is None: return None return dagrun.get_task_instance(task_instance.task_id, session=session)
Given two operators, find their innermost common mapped task group.
def _find_common_ancestor_mapped_group(node1: Operator, node2: Operator) -> MappedTaskGroup | None: """Given two operators, find their innermost common mapped task group.""" if node1.dag is None or node2.dag is None or node1.dag_id != node2.dag_id: return None parent_group_ids = {g.group_id for g in node1.iter_mapped_task_groups()} common_groups = (g for g in node2.iter_mapped_task_groups() if g.group_id in parent_group_ids) return next(common_groups, None)
Whether given operator is *further* mapped inside a task group.
def _is_further_mapped_inside(operator: Operator, container: TaskGroup) -> bool: """Whether given operator is *further* mapped inside a task group.""" if isinstance(operator, MappedOperator): return True task_group = operator.task_group while task_group is not None and task_group.group_id != container.group_id: if isinstance(task_group, MappedTaskGroup): return True task_group = task_group.parent_group return False
Patch a custom ``serialize_value`` to accept the modern signature. To give custom XCom backends more flexibility with how they store values, we now forward all params passed to ``XCom.set`` to ``XCom.serialize_value``. In order to maintain compatibility with custom XCom backends written with the old signature, we check the signature and, if necessary, patch with a method that ignores kwargs the backend does not accept.
def _patch_outdated_serializer(clazz: type[BaseXCom], params: Iterable[str]) -> None: """Patch a custom ``serialize_value`` to accept the modern signature. To give custom XCom backends more flexibility with how they store values, we now forward all params passed to ``XCom.set`` to ``XCom.serialize_value``. In order to maintain compatibility with custom XCom backends written with the old signature, we check the signature and, if necessary, patch with a method that ignores kwargs the backend does not accept. """ old_serializer = clazz.serialize_value @wraps(old_serializer) def _shim(**kwargs): kwargs = {k: kwargs.get(k) for k in params} warnings.warn( f"Method `serialize_value` in XCom backend {XCom.__name__} is using outdated signature and" f"must be updated to accept all params in `BaseXCom.set` except `session`. Support will be " f"removed in a future release.", RemovedInAirflow3Warning, stacklevel=1, ) return old_serializer(**kwargs) clazz.serialize_value = _shim
Return the list of variables names of a function. :param function: The function to inspect
def _get_function_params(function) -> list[str]: """ Return the list of variables names of a function. :param function: The function to inspect """ parameters = inspect.signature(function).parameters bound_arguments = [ name for name, p in parameters.items() if p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD) ] return bound_arguments
Resolve custom XCom class. Confirm that custom XCom class extends the BaseXCom. Compare the function signature of the custom XCom serialize_value to the base XCom serialize_value.
def resolve_xcom_backend() -> type[BaseXCom]: """Resolve custom XCom class. Confirm that custom XCom class extends the BaseXCom. Compare the function signature of the custom XCom serialize_value to the base XCom serialize_value. """ clazz = conf.getimport("core", "xcom_backend", fallback=f"airflow.models.xcom.{BaseXCom.__name__}") if not clazz: return BaseXCom if not issubclass(clazz, BaseXCom): raise TypeError( f"Your custom XCom class `{clazz.__name__}` is not a subclass of `{BaseXCom.__name__}`." ) base_xcom_params = _get_function_params(BaseXCom.serialize_value) xcom_params = _get_function_params(clazz.serialize_value) if set(base_xcom_params) != set(xcom_params): _patch_outdated_serializer(clazz=clazz, params=xcom_params) return clazz
Try to "describe" a callable by getting its name.
def _get_callable_name(f: Callable | str) -> str: """Try to "describe" a callable by getting its name.""" if callable(f): return f.__name__ # Parse the source to find whatever is behind "def". For safety, we don't # want to evaluate the code in any meaningful way! with contextlib.suppress(Exception): kw, name, _ = f.lstrip().split(None, 2) if kw == "def": return name return "<function>"
DAG serialization interface.
def serialize_xcom_arg(value: XComArg) -> dict[str, Any]: """DAG serialization interface.""" key = next(k for k, v in _XCOM_ARG_TYPES.items() if v == type(value)) if key: return {"type": key, **value._serialize()} return value._serialize()
DAG serialization interface.
def deserialize_xcom_arg(data: dict[str, Any], dag: DAG) -> XComArg: """DAG serialization interface.""" klass = _XCOM_ARG_TYPES[data.get("type", "")] return klass._deserialize(data, dag)
Ensure upper and lower time targets are datetimes by combining them with base_date.
def target_times_as_dates( base_date: datetime.datetime, lower: datetime.datetime | datetime.time | None, upper: datetime.datetime | datetime.time | None, ): """Ensure upper and lower time targets are datetimes by combining them with base_date.""" if isinstance(lower, datetime.datetime) and isinstance(upper, datetime.datetime): return lower, upper if lower is not None and isinstance(lower, datetime.time): lower = datetime.datetime.combine(base_date, lower) if upper is not None and isinstance(upper, datetime.time): upper = datetime.datetime.combine(base_date, upper) if lower is None or upper is None: return lower, upper if upper < lower: upper += datetime.timedelta(days=1) return lower, upper
Check if the virtualenv package is installed via checking if it is on the path or installed as package. :return: True if it is. Whichever way of checking it works, is fine.
def is_venv_installed() -> bool: """ Check if the virtualenv package is installed via checking if it is on the path or installed as package. :return: True if it is. Whichever way of checking it works, is fine. """ if shutil.which("virtualenv") or importlib.util.find_spec("virtualenv"): return True return False
Use :func:`airflow.decorators.task` instead, this is deprecated. Calls ``@task.python`` and allows users to turn a Python function into an Airflow task. :param python_callable: A reference to an object that is callable :param op_kwargs: a dictionary of keyword arguments that will get unpacked in your function (templated) :param op_args: a list of positional arguments that will get unpacked when calling your callable (templated) :param multiple_outputs: if set, function return value will be unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys. Defaults to False.
def task(python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs): """Use :func:`airflow.decorators.task` instead, this is deprecated. Calls ``@task.python`` and allows users to turn a Python function into an Airflow task. :param python_callable: A reference to an object that is callable :param op_kwargs: a dictionary of keyword arguments that will get unpacked in your function (templated) :param op_args: a list of positional arguments that will get unpacked when calling your callable (templated) :param multiple_outputs: if set, function return value will be unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys. Defaults to False. """ # To maintain backwards compatibility, we import the task object into this file # This prevents breakages in dags that use `from airflow.operators.python import task` from airflow.decorators.python import python_task warnings.warn( """airflow.operators.python.task is deprecated. Please use the following instead from airflow.decorators import task @task def my_task()""", RemovedInAirflow3Warning, stacklevel=2, ) return python_task(python_callable=python_callable, multiple_outputs=multiple_outputs, **kwargs)
Parse python version info from a text.
def _parse_version_info(text: str) -> tuple[int, int, int, str, int]: """Parse python version info from a text.""" parts = text.strip().split(".") if len(parts) != 5: msg = f"Invalid Python version info, expected 5 components separated by '.', but got {text!r}." raise ValueError(msg) try: return int(parts[0]), int(parts[1]), int(parts[2]), parts[3], int(parts[4]) except ValueError: msg = f"Unable to convert parts {parts} parsed from {text!r} to (int, int, int, str, int)." raise ValueError(msg) from None
Retrieve the execution context dictionary without altering user method's signature. This is the simplest method of retrieving the execution context dictionary. **Old style:** .. code:: python def my_task(**context): ti = context["ti"] **New style:** .. code:: python from airflow.operators.python import get_current_context def my_task(): context = get_current_context() ti = context["ti"] Current context will only have value if this method was called after an operator was starting to execute.
def get_current_context() -> Context: """ Retrieve the execution context dictionary without altering user method's signature. This is the simplest method of retrieving the execution context dictionary. **Old style:** .. code:: python def my_task(**context): ti = context["ti"] **New style:** .. code:: python from airflow.operators.python import get_current_context def my_task(): context = get_current_context() ti = context["ti"] Current context will only have value if this method was called after an operator was starting to execute. """ if not _CURRENT_CONTEXT: raise AirflowException( "Current context was requested but no context was found! " "Are you running within an airflow task?" ) return _CURRENT_CONTEXT[-1]
Unify bucket name and key if a key is provided but not a bucket name.
def provide_bucket_name(func: T) -> T: """Unify bucket name and key if a key is provided but not a bucket name.""" function_signature = signature(func) @wraps(func) def wrapper(*args, **kwargs) -> T: bound_args = function_signature.bind(*args, **kwargs) self = args[0] if bound_args.arguments.get("bucket_name") is None and self.oss_conn_id: connection = self.get_connection(self.oss_conn_id) if connection.schema: bound_args.arguments["bucket_name"] = connection.schema return func(*bound_args.args, **bound_args.kwargs) return cast(T, wrapper)
Unify bucket name and key if a key is provided but not a bucket name.
def unify_bucket_name_and_key(func: T) -> T: """Unify bucket name and key if a key is provided but not a bucket name.""" function_signature = signature(func) @wraps(func) def wrapper(*args, **kwargs) -> T: bound_args = function_signature.bind(*args, **kwargs) def get_key() -> str: if "key" in bound_args.arguments: return "key" raise ValueError("Missing key parameter!") key_name = get_key() if bound_args.arguments.get("bucket_name") is None: bound_args.arguments["bucket_name"], bound_args.arguments["key"] = OSSHook.parse_oss_url( bound_args.arguments[key_name] ) return func(*bound_args.args, **bound_args.kwargs) return cast(T, wrapper)