response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Apply Add ``creating_job_id`` to ``DagRun`` table
def upgrade(): """Apply Add ``creating_job_id`` to ``DagRun`` table""" op.add_column("dag_run", sa.Column("creating_job_id", sa.Integer))
Unapply Add job_id to DagRun table
def downgrade(): """Unapply Add job_id to DagRun table""" op.drop_column("dag_run", "creating_job_id")
Apply add-k8s-yaml-to-rendered-templates
def upgrade(): """Apply add-k8s-yaml-to-rendered-templates""" with op.batch_alter_table(__tablename__, schema=None) as batch_op: batch_op.add_column(k8s_pod_yaml)
Unapply add-k8s-yaml-to-rendered-templates
def downgrade(): """Unapply add-k8s-yaml-to-rendered-templates""" with op.batch_alter_table(__tablename__, schema=None) as batch_op: batch_op.drop_column("k8s_pod_yaml")
Apply Map Airflow permissions.
def remap_permissions(): """Apply Map Airflow permissions.""" appbuilder = cached_app(config={"FAB_UPDATE_PERMS": False}).appbuilder for old, new in mapping.items(): (old_resource_name, old_action_name) = old old_permission = appbuilder.sm.get_permission(old_action_name, old_resource_name) if not old_permission: continue for new_action_name, new_resource_name in new: new_permission = appbuilder.sm.create_permission(new_action_name, new_resource_name) for role in appbuilder.sm.get_all_roles(): if appbuilder.sm.permission_exists_in_one_or_more_roles( old_resource_name, old_action_name, [role.id] ): appbuilder.sm.add_permission_to_role(role, new_permission) appbuilder.sm.remove_permission_from_role(role, old_permission) appbuilder.sm.delete_permission(old_action_name, old_resource_name) if not appbuilder.sm.get_action(old_action_name): continue resources = appbuilder.sm.get_all_resources() if not any(appbuilder.sm.get_permission(old_action_name, resource.name) for resource in resources): appbuilder.sm.delete_action(old_action_name)
Unapply Map Airflow permissions
def undo_remap_permissions(): """Unapply Map Airflow permissions""" appbuilder = cached_app(config={"FAB_UPDATE_PERMS": False}).appbuilder for old, new in mapping.items(): (new_resource_name, new_action_name) = new[0] new_permission = appbuilder.sm.get_permission(new_action_name, new_resource_name) if not new_permission: continue for old_resource_name, old_action_name in old: old_permission = appbuilder.sm.create_permission(old_action_name, old_resource_name) for role in appbuilder.sm.get_all_roles(): if appbuilder.sm.permission_exists_in_one_or_more_roles( new_resource_name, new_action_name, [role.id] ): appbuilder.sm.add_permission_to_role(role, old_permission) appbuilder.sm.remove_permission_from_role(role, new_permission) appbuilder.sm.delete_permission(new_action_name, new_resource_name) if not appbuilder.sm.get_action(new_action_name): continue resources = appbuilder.sm.get_all_resources() if not any(appbuilder.sm.get_permission(new_action_name, resource.name) for resource in resources): appbuilder.sm.delete_action(new_action_name)
Apply Resource based permissions.
def upgrade(): """Apply Resource based permissions.""" log = logging.getLogger() handlers = log.handlers[:] remap_permissions() log.handlers = handlers
Unapply Resource based permissions.
def downgrade(): """Unapply Resource based permissions.""" log = logging.getLogger() handlers = log.handlers[:] undo_remap_permissions() log.handlers = handlers
Apply Add description field to ``connection`` table
def upgrade(): """Apply Add description field to ``connection`` table""" conn = op.get_bind() with op.batch_alter_table("connection") as batch_op: if conn.dialect.name == "mysql": # Handles case where on mysql with utf8mb4 this would exceed the size of row # We have to set text type in this migration even if originally it was string # This is permanently fixed in the follow-up migration 64a7d6477aae batch_op.add_column(sa.Column("description", sa.Text(length=5000), nullable=True)) else: batch_op.add_column(sa.Column("description", sa.String(length=5000), nullable=True))
Unapply Add description field to ``connection`` table
def downgrade(): """Unapply Add description field to ``connection`` table""" with op.batch_alter_table("connection", schema=None) as batch_op: batch_op.drop_column("description")
Apply Fix description field in ``connection`` to be ``text``
def upgrade(): """Apply Fix description field in ``connection`` to be ``text``""" conn = op.get_bind() if conn.dialect.name == "sqlite": # in sqlite TEXT and STRING column types are the same return if conn.dialect.name == "mysql": op.alter_column( "connection", "description", existing_type=sa.String(length=5000), type_=sa.Text(length=5000), existing_nullable=True, ) else: # postgres does not allow size modifier for text type op.alter_column("connection", "description", existing_type=sa.String(length=5000), type_=sa.Text())
Unapply Fix description field in ``connection`` to be ``text``
def downgrade(): """Unapply Fix description field in ``connection`` to be ``text``""" conn = op.get_bind() if conn.dialect.name == "sqlite": # in sqlite TEXT and STRING column types are the same return if conn.dialect.name == "mysql": op.alter_column( "connection", "description", existing_type=sa.Text(5000), type_=sa.String(length=5000), existing_nullable=True, ) else: # postgres does not allow size modifier for text type op.alter_column( "connection", "description", existing_type=sa.Text(), type_=sa.String(length=5000), existing_nullable=True, )
Remove can_read action from config resource for User and Viewer role
def upgrade(): """Remove can_read action from config resource for User and Viewer role""" log = logging.getLogger() handlers = log.handlers[:] appbuilder = cached_app(config={"FAB_UPDATE_PERMS": False}).appbuilder roles_to_modify = [role for role in appbuilder.sm.get_all_roles() if role.name in ["User", "Viewer"]] can_read_on_config_perm = appbuilder.sm.get_permission( permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG ) for role in roles_to_modify: if appbuilder.sm.permission_exists_in_one_or_more_roles( permissions.RESOURCE_CONFIG, permissions.ACTION_CAN_READ, [role.id] ): appbuilder.sm.remove_permission_from_role(role, can_read_on_config_perm) log.handlers = handlers
Add can_read action on config resource for User and Viewer role
def downgrade(): """Add can_read action on config resource for User and Viewer role""" appbuilder = cached_app(config={"FAB_UPDATE_PERMS": False}).appbuilder roles_to_modify = [role for role in appbuilder.sm.get_all_roles() if role.name in ["User", "Viewer"]] can_read_on_config_perm = appbuilder.sm.get_permission( permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG ) for role in roles_to_modify: if not appbuilder.sm.permission_exists_in_one_or_more_roles( permissions.RESOURCE_CONFIG, permissions.ACTION_CAN_READ, [role.id] ): appbuilder.sm.add_permission_to_role(role, can_read_on_config_perm)
Apply increase_length_for_connection_password
def upgrade(): """Apply increase_length_for_connection_password""" with op.batch_alter_table("connection", schema=None) as batch_op: batch_op.alter_column( "extra", existing_type=sa.VARCHAR(length=5000), type_=sa.TEXT(), existing_nullable=True, )
Unapply increase_length_for_connection_password
def downgrade(): """Unapply increase_length_for_connection_password""" with op.batch_alter_table("connection", schema=None) as batch_op: batch_op.alter_column( "extra", existing_type=sa.TEXT(), type_=sa.VARCHAR(length=5000), existing_nullable=True, )
Change default ``pool_slots`` to ``1`` and make pool_slots not nullable
def upgrade(): """Change default ``pool_slots`` to ``1`` and make pool_slots not nullable""" op.execute("UPDATE task_instance SET pool_slots = 1 WHERE pool_slots IS NULL") with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=False, server_default="1")
Unapply Change default ``pool_slots`` to ``1``
def downgrade(): """Unapply Change default ``pool_slots`` to ``1``""" conn = op.get_bind() if conn.dialect.name == "mssql": inspector = sa.inspect(conn.engine) columns = inspector.get_columns("task_instance") for col in columns: if col["name"] == "pool_slots" and col["default"] == "('1')": with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column( "pool_slots", existing_type=sa.Integer, nullable=True, server_default=None ) else: with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=True, server_default=None)
Apply Rename ``last_scheduler_run`` column in ``DAG`` table to ``last_parsed_time``
def upgrade(): """Apply Rename ``last_scheduler_run`` column in ``DAG`` table to ``last_parsed_time``""" conn = op.get_bind() if conn.dialect.name == "mssql": with op.batch_alter_table("dag") as batch_op: batch_op.alter_column( "last_scheduler_run", new_column_name="last_parsed_time", type_=mssql.DATETIME2(precision=6) ) else: with op.batch_alter_table("dag") as batch_op: batch_op.alter_column( "last_scheduler_run", new_column_name="last_parsed_time", type_=sa.TIMESTAMP(timezone=True) )
Unapply Rename ``last_scheduler_run`` column in ``DAG`` table to ``last_parsed_time``
def downgrade(): """Unapply Rename ``last_scheduler_run`` column in ``DAG`` table to ``last_parsed_time``""" conn = op.get_bind() if conn.dialect.name == "mssql": with op.batch_alter_table("dag") as batch_op: batch_op.alter_column( "last_parsed_time", new_column_name="last_scheduler_run", type_=mssql.DATETIME2(precision=6) ) else: with op.batch_alter_table("dag") as batch_op: batch_op.alter_column( "last_parsed_time", new_column_name="last_scheduler_run", type_=sa.TIMESTAMP(timezone=True) )
Apply Increase maximum length of pool name in ``task_instance`` table to ``256`` characters
def upgrade(): """Apply Increase maximum length of pool name in ``task_instance`` table to ``256`` characters""" with op.batch_alter_table("task_instance") as batch_op: batch_op.alter_column("pool", type_=sa.String(256), nullable=False)
Unapply Increase maximum length of pool name in ``task_instance`` table to ``256`` characters
def downgrade(): """Unapply Increase maximum length of pool name in ``task_instance`` table to ``256`` characters""" conn = op.get_bind() if conn.dialect.name == "mssql": with op.batch_alter_table("task_instance") as batch_op: batch_op.drop_index("ti_pool") batch_op.alter_column("pool", type_=sa.String(50), nullable=False) batch_op.create_index("ti_pool", ["pool"]) else: with op.batch_alter_table("task_instance") as batch_op: batch_op.alter_column("pool", type_=sa.String(50), nullable=False)
Apply Add description field to ``Variable`` model
def upgrade(): """Apply Add description field to ``Variable`` model""" with op.batch_alter_table("variable", schema=None) as batch_op: batch_op.add_column(sa.Column("description", sa.Text(), nullable=True))
Unapply Add description field to ``Variable`` model
def downgrade(): """Unapply Add description field to ``Variable`` model""" with op.batch_alter_table("variable", schema=None) as batch_op: batch_op.drop_column("description")
Apply Map Airflow permissions.
def remap_permissions(): """Apply Map Airflow permissions.""" appbuilder = cached_app(config={"FAB_UPDATE_PERMS": False}).appbuilder for old, new in mapping.items(): (old_resource_name, old_action_name) = old old_permission = appbuilder.sm.get_permission(old_action_name, old_resource_name) if not old_permission: continue for new_action_name, new_resource_name in new: new_permission = appbuilder.sm.create_permission(new_action_name, new_resource_name) for role in appbuilder.sm.get_all_roles(): if appbuilder.sm.permission_exists_in_one_or_more_roles( old_resource_name, old_action_name, [role.id] ): appbuilder.sm.add_permission_to_role(role, new_permission) appbuilder.sm.remove_permission_from_role(role, old_permission) appbuilder.sm.delete_permission(old_action_name, old_resource_name) if not appbuilder.sm.get_action(old_action_name): continue resources = appbuilder.sm.get_all_resources() if not any(appbuilder.sm.get_permission(old_action_name, resource.name) for resource in resources): appbuilder.sm.delete_action(old_action_name)
Unapply Map Airflow permissions
def undo_remap_permissions(): """Unapply Map Airflow permissions""" appbuilder = cached_app(config={"FAB_UPDATE_PERMS": False}).appbuilder for old, new in mapping.items(): (new_resource_name, new_action_name) = new[0] new_permission = appbuilder.sm.get_permission(new_action_name, new_resource_name) if not new_permission: continue for old_action_name, old_resource_name in old: old_permission = appbuilder.sm.create_permission(old_action_name, old_resource_name) for role in appbuilder.sm.get_all_roles(): if appbuilder.sm.permission_exists_in_one_or_more_roles( new_resource_name, new_action_name, [role.id] ): appbuilder.sm.add_permission_to_role(role, old_permission) appbuilder.sm.remove_permission_from_role(role, new_permission) appbuilder.sm.delete_permission(new_action_name, new_resource_name) if not appbuilder.sm.get_action(new_action_name): continue resources = appbuilder.sm.get_all_resources() if not any(appbuilder.sm.get_permission(new_action_name, resource.name) for resource in resources): appbuilder.sm.delete_action(new_action_name)
Apply Resource based permissions.
def upgrade(): """Apply Resource based permissions.""" log = logging.getLogger() handlers = log.handlers[:] remap_permissions() log.handlers = handlers
Unapply Resource based permissions.
def downgrade(): """Unapply Resource based permissions.""" log = logging.getLogger() handlers = log.handlers[:] undo_remap_permissions() log.handlers = handlers
Apply Add ``queued_at`` column in ``dag_run`` table
def upgrade(): """Apply Add ``queued_at`` column in ``dag_run`` table""" op.add_column("dag_run", sa.Column("queued_at", TIMESTAMP, nullable=True))
Unapply Add ``queued_at`` column in ``dag_run`` table
def downgrade(): """Unapply Add ``queued_at`` column in ``dag_run`` table""" with op.batch_alter_table("dag_run") as batch_op: batch_op.drop_column("queued_at")
Apply Add ``max_active_runs`` column to ``dag_model`` table
def upgrade(): """Apply Add ``max_active_runs`` column to ``dag_model`` table""" op.add_column("dag", sa.Column("max_active_runs", sa.Integer(), nullable=True)) with op.batch_alter_table("dag_run", schema=None) as batch_op: # Add index to dag_run.dag_id and also add index to dag_run.state where state==running batch_op.create_index("idx_dag_run_dag_id", ["dag_id"]) batch_op.create_index( "idx_dag_run_running_dags", ["state", "dag_id"], postgresql_where=text("state='running'"), mssql_where=text("state='running'"), sqlite_where=text("state='running'"), )
Unapply Add ``max_active_runs`` column to ``dag_model`` table
def downgrade(): """Unapply Add ``max_active_runs`` column to ``dag_model`` table""" with op.batch_alter_table("dag") as batch_op: batch_op.drop_column("max_active_runs") with op.batch_alter_table("dag_run", schema=None) as batch_op: # Drop index to dag_run.dag_id and also drop index to dag_run.state where state==running batch_op.drop_index("idx_dag_run_dag_id") batch_op.drop_index("idx_dag_run_running_dags")
Apply Add index on state, dag_id for queued ``dagrun``
def upgrade(): """Apply Add index on state, dag_id for queued ``dagrun``""" with op.batch_alter_table("dag_run") as batch_op: batch_op.create_index( "idx_dag_run_queued_dags", ["state", "dag_id"], postgresql_where=text("state='queued'"), mssql_where=text("state='queued'"), sqlite_where=text("state='queued'"), )
Unapply Add index on state, dag_id for queued ``dagrun``
def downgrade(): """Unapply Add index on state, dag_id for queued ``dagrun``""" with op.batch_alter_table("dag_run") as batch_op: batch_op.drop_index("idx_dag_run_queued_dags")
This function checks if the MS SQL table is empty :param conn: SQL connection object :param table_name: table name :return: Booelan indicating if the table is present
def is_table_empty(conn, table_name): """ This function checks if the MS SQL table is empty :param conn: SQL connection object :param table_name: table name :return: Booelan indicating if the table is present """ return conn.execute(text(f"select TOP 1 * from {table_name}")).first() is None
This function return primary and unique constraint along with column name. some tables like task_instance is missing primary key constraint name and the name is auto-generated by sql server. so this function helps to retrieve any primary or unique constraint name. :param conn: sql connection object :param table_name: table name :return: a dictionary of ((constraint name, constraint type), column name) of table
def get_table_constraints(conn, table_name) -> dict[tuple[str, str], list[str]]: """ This function return primary and unique constraint along with column name. some tables like task_instance is missing primary key constraint name and the name is auto-generated by sql server. so this function helps to retrieve any primary or unique constraint name. :param conn: sql connection object :param table_name: table name :return: a dictionary of ((constraint name, constraint type), column name) of table """ query = text( f"""SELECT tc.CONSTRAINT_NAME , tc.CONSTRAINT_TYPE, ccu.COLUMN_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu ON ccu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME WHERE tc.TABLE_NAME = '{table_name}' AND (tc.CONSTRAINT_TYPE = 'PRIMARY KEY' or UPPER(tc.CONSTRAINT_TYPE) = 'UNIQUE') """ ) result = conn.execute(query).fetchall() constraint_dict = defaultdict(list) for constraint, constraint_type, column in result: constraint_dict[(constraint, constraint_type)].append(column) return constraint_dict
Drop a primary key or unique constraint :param operator: batch_alter_table for the table :param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
def drop_column_constraints(operator, column_name, constraint_dict): """ Drop a primary key or unique constraint :param operator: batch_alter_table for the table :param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table """ for constraint, columns in constraint_dict.items(): if column_name in columns: if constraint[1].lower().startswith("primary"): operator.drop_constraint(constraint[0], type_="primary") elif constraint[1].lower().startswith("unique"): operator.drop_constraint(constraint[0], type_="unique")
Create a primary key or unique constraint :param operator: batch_alter_table for the table :param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
def create_constraints(operator, column_name, constraint_dict): """ Create a primary key or unique constraint :param operator: batch_alter_table for the table :param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table """ for constraint, columns in constraint_dict.items(): if column_name in columns: if constraint[1].lower().startswith("primary"): operator.create_primary_key(constraint_name=constraint[0], columns=columns) elif constraint[1].lower().startswith("unique"): operator.create_unique_constraint(constraint_name=constraint[0], columns=columns)
Drop the timestamp column and recreate it as datetime or datetime2(6)
def recreate_mssql_ts_column(conn, op, table_name, column_name): """ Drop the timestamp column and recreate it as datetime or datetime2(6) """ if _is_timestamp(conn, table_name, column_name) and is_table_empty(conn, table_name): with op.batch_alter_table(table_name) as batch_op: constraint_dict = get_table_constraints(conn, table_name) drop_column_constraints(batch_op, column_name, constraint_dict) batch_op.drop_column(column_name=column_name) batch_op.add_column(sa.Column(column_name, TIMESTAMP, nullable=False)) create_constraints(batch_op, column_name, constraint_dict)
Update the datetime column to datetime2(6)
def alter_mssql_datetime_column(conn, op, table_name, column_name, nullable): """Update the datetime column to datetime2(6)""" op.alter_column( table_name=table_name, column_name=column_name, type_=mssql.DATETIME2(precision=6), nullable=nullable, )
Improve compatibility with MSSQL backend
def upgrade(): """Improve compatibility with MSSQL backend""" conn = op.get_bind() if conn.dialect.name != "mssql": return recreate_mssql_ts_column(conn, op, "dag_code", "last_updated") recreate_mssql_ts_column(conn, op, "rendered_task_instance_fields", "execution_date") alter_mssql_datetime_column(conn, op, "serialized_dag", "last_updated", False) op.alter_column(table_name="xcom", column_name="timestamp", type_=TIMESTAMP, nullable=False) with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op: task_reschedule_batch_op.alter_column(column_name="end_date", type_=TIMESTAMP, nullable=False) task_reschedule_batch_op.alter_column(column_name="reschedule_date", type_=TIMESTAMP, nullable=False) task_reschedule_batch_op.alter_column(column_name="start_date", type_=TIMESTAMP, nullable=False) with op.batch_alter_table("task_fail") as task_fail_batch_op: task_fail_batch_op.drop_index("idx_task_fail_dag_task_date") task_fail_batch_op.alter_column(column_name="execution_date", type_=TIMESTAMP, nullable=False) task_fail_batch_op.create_index( "idx_task_fail_dag_task_date", ["dag_id", "task_id", "execution_date"], unique=False ) with op.batch_alter_table("task_instance") as task_instance_batch_op: task_instance_batch_op.drop_index("ti_state_lkp") task_instance_batch_op.create_index( "ti_state_lkp", ["dag_id", "task_id", "execution_date", "state"], unique=False ) constraint_dict = get_table_constraints(conn, "dag_run") for constraint, columns in constraint_dict.items(): if "dag_id" in columns: if constraint[1].lower().startswith("unique"): op.drop_constraint(constraint[0], "dag_run", type_="unique") # create filtered indexes conn.execute( text( """CREATE UNIQUE NONCLUSTERED INDEX idx_not_null_dag_id_execution_date ON dag_run(dag_id,execution_date) WHERE dag_id IS NOT NULL and execution_date is not null""" ) ) conn.execute( text( """CREATE UNIQUE NONCLUSTERED INDEX idx_not_null_dag_id_run_id ON dag_run(dag_id,run_id) WHERE dag_id IS NOT NULL and run_id is not null""" ) )
Reverse MSSQL backend compatibility improvements
def downgrade(): """Reverse MSSQL backend compatibility improvements""" conn = op.get_bind() if conn.dialect.name != "mssql": return op.alter_column(table_name="xcom", column_name="timestamp", type_=TIMESTAMP, nullable=True) with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op: task_reschedule_batch_op.alter_column(column_name="end_date", type_=TIMESTAMP, nullable=True) task_reschedule_batch_op.alter_column(column_name="reschedule_date", type_=TIMESTAMP, nullable=True) task_reschedule_batch_op.alter_column(column_name="start_date", type_=TIMESTAMP, nullable=True) with op.batch_alter_table("task_fail") as task_fail_batch_op: task_fail_batch_op.drop_index("idx_task_fail_dag_task_date") task_fail_batch_op.alter_column(column_name="execution_date", type_=TIMESTAMP, nullable=False) task_fail_batch_op.create_index( "idx_task_fail_dag_task_date", ["dag_id", "task_id", "execution_date"], unique=False ) with op.batch_alter_table("task_instance") as task_instance_batch_op: task_instance_batch_op.drop_index("ti_state_lkp") task_instance_batch_op.create_index( "ti_state_lkp", ["dag_id", "task_id", "execution_date"], unique=False ) op.create_unique_constraint("UQ__dag_run__dag_id_run_id", "dag_run", ["dag_id", "run_id"]) op.create_unique_constraint("UQ__dag_run__dag_id_execution_date", "dag_run", ["dag_id", "execution_date"]) op.drop_index("idx_not_null_dag_id_execution_date", table_name="dag_run") op.drop_index("idx_not_null_dag_id_run_id", table_name="dag_run")
Apply Make XCom primary key columns non-nullable
def upgrade(): """Apply Make XCom primary key columns non-nullable""" conn = op.get_bind() with op.batch_alter_table("xcom") as bop: bop.alter_column("key", type_=StringID(length=512), nullable=False) bop.alter_column("execution_date", type_=TIMESTAMP, nullable=False) if conn.dialect.name == "mssql": bop.create_primary_key( constraint_name="pk_xcom", columns=["dag_id", "task_id", "key", "execution_date"] )
Unapply Make XCom primary key columns non-nullable
def downgrade(): """Unapply Make XCom primary key columns non-nullable""" conn = op.get_bind() with op.batch_alter_table("xcom") as bop: # regardless of what the model defined, the `key` and `execution_date` # columns were always non-nullable for mysql, sqlite and postgres, so leave them alone if conn.dialect.name == "mssql": bop.drop_constraint("pk_xcom", type_="primary") # execution_date and key wasn't nullable in the other databases bop.alter_column("key", type_=StringID(length=512), nullable=True) bop.alter_column("execution_date", type_=TIMESTAMP, nullable=True)
Apply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``
def upgrade(): """Apply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``""" conn = op.get_bind() is_sqlite = bool(conn.dialect.name == "sqlite") if is_sqlite: op.execute("PRAGMA foreign_keys=off") with op.batch_alter_table("dag") as batch_op: batch_op.alter_column( "concurrency", new_column_name="max_active_tasks", type_=sa.Integer(), nullable=False, ) if is_sqlite: op.execute("PRAGMA foreign_keys=on")
Unapply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``
def downgrade(): """Unapply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``""" with op.batch_alter_table("dag") as batch_op: batch_op.alter_column( "max_active_tasks", new_column_name="concurrency", type_=sa.Integer(), nullable=False, )
Apply Adds ``trigger`` table and deferrable operator columns to task instance
def upgrade(): """Apply Adds ``trigger`` table and deferrable operator columns to task instance""" op.create_table( "trigger", sa.Column("id", sa.Integer(), primary_key=True, nullable=False), sa.Column("classpath", sa.String(length=1000), nullable=False), sa.Column("kwargs", ExtendedJSON(), nullable=False), sa.Column("created_date", sa.DateTime(), nullable=False), sa.Column("triggerer_id", sa.Integer(), nullable=True), ) with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.add_column(sa.Column("trigger_id", sa.Integer())) batch_op.add_column(sa.Column("trigger_timeout", sa.DateTime())) batch_op.add_column(sa.Column("next_method", sa.String(length=1000))) batch_op.add_column(sa.Column("next_kwargs", ExtendedJSON())) batch_op.create_foreign_key( "task_instance_trigger_id_fkey", "trigger", ["trigger_id"], ["id"], ondelete="CASCADE" ) batch_op.create_index("ti_trigger_id", ["trigger_id"])
Unapply Adds ``trigger`` table and deferrable operator columns to task instance
def downgrade(): """Unapply Adds ``trigger`` table and deferrable operator columns to task instance""" with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.drop_constraint("task_instance_trigger_id_fkey", type_="foreignkey") batch_op.drop_index("ti_trigger_id") batch_op.drop_column("trigger_id") batch_op.drop_column("trigger_timeout") batch_op.drop_column("next_method") batch_op.drop_column("next_kwargs") op.drop_table("trigger")
Apply data_interval fields to DagModel and DagRun.
def upgrade(): """Apply data_interval fields to DagModel and DagRun.""" with op.batch_alter_table("dag_run") as batch_op: batch_op.add_column(Column("data_interval_start", TIMESTAMP)) batch_op.add_column(Column("data_interval_end", TIMESTAMP)) with op.batch_alter_table("dag") as batch_op: batch_op.add_column(Column("next_dagrun_data_interval_start", TIMESTAMP)) batch_op.add_column(Column("next_dagrun_data_interval_end", TIMESTAMP))
Unapply data_interval fields to DagModel and DagRun.
def downgrade(): """Unapply data_interval fields to DagModel and DagRun.""" with op.batch_alter_table("dag_run") as batch_op: batch_op.drop_column("data_interval_start") batch_op.drop_column("data_interval_end") with op.batch_alter_table("dag") as batch_op: batch_op.drop_column("next_dagrun_data_interval_start") batch_op.drop_column("next_dagrun_data_interval_end")
Apply Change ``TaskInstance`` and ``TaskReschedule`` tables from execution_date to run_id.
def upgrade(): """Apply Change ``TaskInstance`` and ``TaskReschedule`` tables from execution_date to run_id.""" conn = op.get_bind() dialect_name = conn.dialect.name dt_type = TIMESTAMP string_id_col_type = StringID() if dialect_name == "sqlite": naming_convention = { "uq": "%(table_name)s_%(column_0_N_name)s_key", } # The naming_convention force the previously un-named UNIQUE constraints to have the right name with op.batch_alter_table( "dag_run", naming_convention=naming_convention, recreate="always" ) as batch_op: batch_op.alter_column("dag_id", existing_type=string_id_col_type, nullable=False) batch_op.alter_column("run_id", existing_type=string_id_col_type, nullable=False) batch_op.alter_column("execution_date", existing_type=dt_type, nullable=False) elif dialect_name == "mysql": with op.batch_alter_table("dag_run") as batch_op: batch_op.alter_column( "dag_id", existing_type=sa.String(length=ID_LEN), type_=string_id_col_type, nullable=False ) batch_op.alter_column( "run_id", existing_type=sa.String(length=ID_LEN), type_=string_id_col_type, nullable=False ) batch_op.alter_column("execution_date", existing_type=dt_type, nullable=False) inspector = sa.inspect(conn.engine) unique_keys = inspector.get_unique_constraints("dag_run") for unique_key in unique_keys: batch_op.drop_constraint(unique_key["name"], type_="unique") batch_op.create_unique_constraint( "dag_run_dag_id_execution_date_key", ["dag_id", "execution_date"] ) batch_op.create_unique_constraint("dag_run_dag_id_run_id_key", ["dag_id", "run_id"]) elif dialect_name == "mssql": with op.batch_alter_table("dag_run") as batch_op: batch_op.drop_index("idx_not_null_dag_id_execution_date") batch_op.drop_index("idx_not_null_dag_id_run_id") batch_op.drop_index("dag_id_state") batch_op.drop_index("idx_dag_run_dag_id") batch_op.drop_index("idx_dag_run_running_dags") batch_op.drop_index("idx_dag_run_queued_dags") batch_op.alter_column("dag_id", existing_type=string_id_col_type, nullable=False) batch_op.alter_column("execution_date", existing_type=dt_type, nullable=False) batch_op.alter_column("run_id", existing_type=string_id_col_type, nullable=False) # _Somehow_ mssql was missing these constraints entirely batch_op.create_unique_constraint( "dag_run_dag_id_execution_date_key", ["dag_id", "execution_date"] ) batch_op.create_unique_constraint("dag_run_dag_id_run_id_key", ["dag_id", "run_id"]) batch_op.create_index("dag_id_state", ["dag_id", "state"], unique=False) batch_op.create_index("idx_dag_run_dag_id", ["dag_id"]) batch_op.create_index( "idx_dag_run_running_dags", ["state", "dag_id"], mssql_where=sa.text("state='running'"), ) batch_op.create_index( "idx_dag_run_queued_dags", ["state", "dag_id"], mssql_where=sa.text("state='queued'"), ) else: # Make sure DagRun PK columns are non-nullable with op.batch_alter_table("dag_run", schema=None) as batch_op: batch_op.alter_column("dag_id", existing_type=string_id_col_type, nullable=False) batch_op.alter_column("execution_date", existing_type=dt_type, nullable=False) batch_op.alter_column("run_id", existing_type=string_id_col_type, nullable=False) # First create column nullable op.add_column("task_instance", sa.Column("run_id", type_=string_id_col_type, nullable=True)) op.add_column("task_reschedule", sa.Column("run_id", type_=string_id_col_type, nullable=True)) # # TaskReschedule has a FK to TaskInstance, so we have to update that before # we can drop the TI.execution_date column update_query = _multi_table_update(dialect_name, task_reschedule, task_reschedule.c.run_id) op.execute(update_query) with op.batch_alter_table("task_reschedule", schema=None) as batch_op: batch_op.alter_column( "run_id", existing_type=string_id_col_type, existing_nullable=True, nullable=False ) batch_op.drop_constraint("task_reschedule_dag_task_date_fkey", type_="foreignkey") if dialect_name == "mysql": # Mysql creates an index and a constraint -- we have to drop both batch_op.drop_index("task_reschedule_dag_task_date_fkey") batch_op.alter_column( "dag_id", existing_type=sa.String(length=ID_LEN), type_=string_id_col_type, nullable=False ) batch_op.drop_index("idx_task_reschedule_dag_task_date") # Then update the new column by selecting the right value from DagRun # But first we will drop and recreate indexes to make it faster if dialect_name == "postgresql": # Recreate task_instance, without execution_date and with dagrun.run_id op.execute( """ CREATE TABLE new_task_instance AS SELECT ti.task_id, ti.dag_id, dag_run.run_id, ti.start_date, ti.end_date, ti.duration, ti.state, ti.try_number, ti.hostname, ti.unixname, ti.job_id, ti.pool, ti.queue, ti.priority_weight, ti.operator, ti.queued_dttm, ti.pid, ti.max_tries, ti.executor_config, ti.pool_slots, ti.queued_by_job_id, ti.external_executor_id, ti.trigger_id, ti.trigger_timeout, ti.next_method, ti.next_kwargs FROM task_instance ti INNER JOIN dag_run ON dag_run.dag_id = ti.dag_id AND dag_run.execution_date = ti.execution_date; """ ) op.drop_table("task_instance") op.rename_table("new_task_instance", "task_instance") # Fix up columns after the 'create table as select' with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column( "pool", existing_type=string_id_col_type, existing_nullable=True, nullable=False ) batch_op.alter_column("max_tries", existing_type=sa.Integer(), server_default="-1") batch_op.alter_column( "pool_slots", existing_type=sa.Integer(), existing_nullable=True, nullable=False ) else: update_query = _multi_table_update(dialect_name, task_instance, task_instance.c.run_id) op.execute(update_query) with op.batch_alter_table("task_instance", schema=None) as batch_op: if dialect_name != "postgresql": # TODO: Is this right for non-postgres? if dialect_name == "mssql": constraints = get_mssql_table_constraints(conn, "task_instance") pk, _ = constraints["PRIMARY KEY"].popitem() batch_op.drop_constraint(pk, type_="primary") elif dialect_name not in ("sqlite"): batch_op.drop_constraint("task_instance_pkey", type_="primary") batch_op.drop_index("ti_dag_date") batch_op.drop_index("ti_state_lkp") batch_op.drop_column("execution_date") # Then make it non-nullable batch_op.alter_column( "run_id", existing_type=string_id_col_type, existing_nullable=True, nullable=False ) batch_op.alter_column( "dag_id", existing_type=string_id_col_type, existing_nullable=True, nullable=False ) batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id"]) batch_op.create_foreign_key( "task_instance_dag_run_fkey", "dag_run", ["dag_id", "run_id"], ["dag_id", "run_id"], ondelete="CASCADE", ) batch_op.create_index("ti_dag_run", ["dag_id", "run_id"]) batch_op.create_index("ti_state_lkp", ["dag_id", "task_id", "run_id", "state"]) if dialect_name == "postgresql": batch_op.create_index("ti_dag_state", ["dag_id", "state"]) batch_op.create_index("ti_job_id", ["job_id"]) batch_op.create_index("ti_pool", ["pool", "state", "priority_weight"]) batch_op.create_index("ti_state", ["state"]) batch_op.create_foreign_key( "task_instance_trigger_id_fkey", "trigger", ["trigger_id"], ["id"], ondelete="CASCADE" ) batch_op.create_index("ti_trigger_id", ["trigger_id"]) with op.batch_alter_table("task_reschedule", schema=None) as batch_op: batch_op.drop_column("execution_date") batch_op.create_index( "idx_task_reschedule_dag_task_run", ["dag_id", "task_id", "run_id"], unique=False, ) # _Now_ there is a unique constraint on the columns in TI we can re-create the FK from TaskReschedule batch_op.create_foreign_key( "task_reschedule_ti_fkey", "task_instance", ["dag_id", "task_id", "run_id"], ["dag_id", "task_id", "run_id"], ondelete="CASCADE", ) # https://docs.microsoft.com/en-us/sql/relational-databases/errors-events/mssqlserver-1785-database-engine-error?view=sql-server-ver15 ondelete = "CASCADE" if dialect_name != "mssql" else "NO ACTION" batch_op.create_foreign_key( "task_reschedule_dr_fkey", "dag_run", ["dag_id", "run_id"], ["dag_id", "run_id"], ondelete=ondelete, )
Unapply Change ``TaskInstance`` and ``TaskReschedule`` tables from execution_date to run_id.
def downgrade(): """Unapply Change ``TaskInstance`` and ``TaskReschedule`` tables from execution_date to run_id.""" dialect_name = op.get_bind().dialect.name dt_type = TIMESTAMP string_id_col_type = StringID() op.add_column("task_instance", sa.Column("execution_date", dt_type, nullable=True)) op.add_column("task_reschedule", sa.Column("execution_date", dt_type, nullable=True)) update_query = _multi_table_update(dialect_name, task_instance, task_instance.c.execution_date) op.execute(update_query) update_query = _multi_table_update(dialect_name, task_reschedule, task_reschedule.c.execution_date) op.execute(update_query) with op.batch_alter_table("task_reschedule", schema=None) as batch_op: batch_op.alter_column("execution_date", existing_type=dt_type, existing_nullable=True, nullable=False) # Can't drop PK index while there is a FK referencing it batch_op.drop_constraint("task_reschedule_ti_fkey", type_="foreignkey") batch_op.drop_constraint("task_reschedule_dr_fkey", type_="foreignkey") batch_op.drop_index("idx_task_reschedule_dag_task_run") with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.drop_constraint("task_instance_pkey", type_="primary") batch_op.alter_column("execution_date", existing_type=dt_type, existing_nullable=True, nullable=False) if dialect_name != "mssql": batch_op.alter_column( "dag_id", existing_type=string_id_col_type, existing_nullable=False, nullable=True ) batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "execution_date"]) batch_op.drop_constraint("task_instance_dag_run_fkey", type_="foreignkey") batch_op.drop_index("ti_dag_run") batch_op.drop_index("ti_state_lkp") batch_op.create_index("ti_state_lkp", ["dag_id", "task_id", "execution_date", "state"]) batch_op.create_index("ti_dag_date", ["dag_id", "execution_date"], unique=False) batch_op.drop_column("run_id") with op.batch_alter_table("task_reschedule", schema=None) as batch_op: batch_op.drop_column("run_id") batch_op.create_index( "idx_task_reschedule_dag_task_date", ["dag_id", "task_id", "execution_date"], unique=False, ) # Can only create FK once there is an index on these columns batch_op.create_foreign_key( "task_reschedule_dag_task_date_fkey", "task_instance", ["dag_id", "task_id", "execution_date"], ["dag_id", "task_id", "execution_date"], ondelete="CASCADE", ) if dialect_name == "mysql": batch_op.create_index( "task_reschedule_dag_task_date_fkey", ["dag_id", "execution_date"], unique=False ) if dialect_name == "mssql": with op.batch_alter_table("dag_run", schema=None) as batch_op: batch_op.drop_constraint("dag_run_dag_id_execution_date_key", type_="unique") batch_op.drop_constraint("dag_run_dag_id_run_id_key", type_="unique") batch_op.drop_index("dag_id_state") batch_op.drop_index("idx_dag_run_running_dags") batch_op.drop_index("idx_dag_run_queued_dags") batch_op.drop_index("idx_dag_run_dag_id") batch_op.alter_column("dag_id", existing_type=string_id_col_type, nullable=True) batch_op.alter_column("execution_date", existing_type=dt_type, nullable=True) batch_op.alter_column("run_id", existing_type=string_id_col_type, nullable=True) batch_op.create_index("dag_id_state", ["dag_id", "state"], unique=False) batch_op.create_index("idx_dag_run_dag_id", ["dag_id"]) batch_op.create_index( "idx_dag_run_running_dags", ["state", "dag_id"], mssql_where=sa.text("state='running'"), ) batch_op.create_index( "idx_dag_run_queued_dags", ["state", "dag_id"], mssql_where=sa.text("state='queued'"), ) op.execute( """CREATE UNIQUE NONCLUSTERED INDEX idx_not_null_dag_id_execution_date ON dag_run(dag_id,execution_date) WHERE dag_id IS NOT NULL and execution_date is not null""" ) op.execute( """CREATE UNIQUE NONCLUSTERED INDEX idx_not_null_dag_id_run_id ON dag_run(dag_id,run_id) WHERE dag_id IS NOT NULL and run_id is not null""" ) else: with op.batch_alter_table("dag_run", schema=None) as batch_op: batch_op.drop_index("dag_id_state") batch_op.alter_column("run_id", existing_type=sa.VARCHAR(length=250), nullable=True) batch_op.alter_column("execution_date", existing_type=dt_type, nullable=True) batch_op.alter_column("dag_id", existing_type=sa.VARCHAR(length=250), nullable=True) batch_op.create_index("dag_id_state", ["dag_id", "state"], unique=False)
Apply Add has_import_errors column to DagModel
def upgrade(): """Apply Add has_import_errors column to DagModel""" op.add_column("dag", sa.Column("has_import_errors", sa.Boolean(), server_default="0"))
Unapply Add has_import_errors column to DagModel
def downgrade(): """Unapply Add has_import_errors column to DagModel""" with op.batch_alter_table("dag") as batch_op: batch_op.drop_column("has_import_errors", mssql_drop_default=True)
Apply Create a ``session`` table to store web session data
def upgrade(): """Apply Create a ``session`` table to store web session data""" op.create_table( TABLE_NAME, sa.Column("id", sa.Integer()), sa.Column("session_id", sa.String(255)), sa.Column("data", sa.LargeBinary()), sa.Column("expiry", sa.DateTime()), sa.PrimaryKeyConstraint("id"), sa.UniqueConstraint("session_id"), )
Unapply Create a ``session`` table to store web session data
def downgrade(): """Unapply Create a ``session`` table to store web session data""" op.drop_table(TABLE_NAME)
Apply Add index for ``dag_id`` column in ``job`` table.
def upgrade(): """Apply Add index for ``dag_id`` column in ``job`` table.""" op.create_index("idx_job_dag_id", "job", ["dag_id"], unique=False)
Unapply Add index for ``dag_id`` column in ``job`` table.
def downgrade(): """Unapply Add index for ``dag_id`` column in ``job`` table.""" op.drop_index("idx_job_dag_id", table_name="job")
Increase length of email from 64 to 256 characters
def upgrade(): """Increase length of email from 64 to 256 characters""" with op.batch_alter_table("ab_user") as batch_op: batch_op.alter_column("username", type_=sa.String(256)) batch_op.alter_column("email", type_=sa.String(256)) with op.batch_alter_table("ab_register_user") as batch_op: batch_op.alter_column("username", type_=sa.String(256)) batch_op.alter_column("email", type_=sa.String(256))
Revert length of email from 256 to 64 characters
def downgrade(): """Revert length of email from 256 to 64 characters""" conn = op.get_bind() if conn.dialect.name != "mssql": with op.batch_alter_table("ab_user") as batch_op: batch_op.alter_column("username", type_=sa.String(64), nullable=False) batch_op.alter_column("email", type_=sa.String(64)) with op.batch_alter_table("ab_register_user") as batch_op: batch_op.alter_column("username", type_=sa.String(64)) batch_op.alter_column("email", type_=sa.String(64)) else: # MSSQL doesn't drop implicit unique constraints it created # We need to drop the two unique constraints explicitly with op.batch_alter_table("ab_user") as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, "ab_user") unique_key, _ = constraints["UNIQUE"].popitem() batch_op.drop_constraint(unique_key, type_="unique") unique_key, _ = constraints["UNIQUE"].popitem() batch_op.drop_constraint(unique_key, type_="unique") batch_op.alter_column("username", type_=sa.String(64), nullable=False) batch_op.create_unique_constraint(None, ["username"]) batch_op.alter_column("email", type_=sa.String(64)) batch_op.create_unique_constraint(None, ["email"]) with op.batch_alter_table("ab_register_user") as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, "ab_register_user") for k, _ in constraints.get("UNIQUE").items(): batch_op.drop_constraint(k, type_="unique") batch_op.alter_column("username", type_=sa.String(64)) batch_op.create_unique_constraint(None, ["username"]) batch_op.alter_column("email", type_=sa.String(64))
Apply Add ``timetable_description`` column to DagModel for UI.
def upgrade(): """Apply Add ``timetable_description`` column to DagModel for UI.""" with op.batch_alter_table("dag", schema=None) as batch_op: batch_op.add_column(sa.Column("timetable_description", sa.String(length=1000), nullable=True))
Unapply Add ``timetable_description`` column to DagModel for UI.
def downgrade(): """Unapply Add ``timetable_description`` column to DagModel for UI.""" is_sqlite = bool(op.get_bind().dialect.name == "sqlite") if is_sqlite: op.execute("PRAGMA foreign_keys=off") with op.batch_alter_table("dag") as batch_op: batch_op.drop_column("timetable_description") if is_sqlite: op.execute("PRAGMA foreign_keys=on")
Add model for task log template and establish fk on task instance.
def upgrade(): """Add model for task log template and establish fk on task instance.""" op.create_table( "log_template", Column("id", Integer, primary_key=True, autoincrement=True), Column("filename", Text, nullable=False), Column("elasticsearch_id", Text, nullable=False), Column("created_at", UtcDateTime, nullable=False), ) dag_run_log_filename_id = Column( "log_template_id", Integer, ForeignKey("log_template.id", name="task_instance_log_template_id_fkey", ondelete="NO ACTION"), ) with disable_sqlite_fkeys(op), op.batch_alter_table("dag_run") as batch_op: batch_op.add_column(dag_run_log_filename_id)
Remove fk on task instance and model for task log filename template.
def downgrade(): """Remove fk on task instance and model for task log filename template.""" with op.batch_alter_table("dag_run") as batch_op: batch_op.drop_constraint("task_instance_log_template_id_fkey", type_="foreignkey") batch_op.drop_column("log_template_id") op.drop_table("log_template")
Add ``map_index`` column to TaskInstance to identify task-mapping, and a ``task_map`` table to track mapping values from XCom.
def upgrade(): """ Add ``map_index`` column to TaskInstance to identify task-mapping, and a ``task_map`` table to track mapping values from XCom. """ # We need to first remove constraints on task_reschedule since they depend on task_instance. with op.batch_alter_table("task_reschedule") as batch_op: batch_op.drop_constraint("task_reschedule_ti_fkey", type_="foreignkey") batch_op.drop_index("idx_task_reschedule_dag_task_run") # Change task_instance's primary key. with op.batch_alter_table("task_instance") as batch_op: # I think we always use this name for TaskInstance after 7b2661a43ba3? batch_op.drop_constraint("task_instance_pkey", type_="primary") batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1"))) batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id", "map_index"]) # Re-create task_reschedule's constraints. with op.batch_alter_table("task_reschedule") as batch_op: batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1"))) batch_op.create_foreign_key( "task_reschedule_ti_fkey", "task_instance", ["dag_id", "task_id", "run_id", "map_index"], ["dag_id", "task_id", "run_id", "map_index"], ondelete="CASCADE", ) batch_op.create_index( "idx_task_reschedule_dag_task_run", ["dag_id", "task_id", "run_id", "map_index"], unique=False, ) # Create task_map. op.create_table( "task_map", Column("dag_id", StringID(), primary_key=True), Column("task_id", StringID(), primary_key=True), Column("run_id", StringID(), primary_key=True), Column("map_index", Integer, primary_key=True), Column("length", Integer, nullable=False), Column("keys", ExtendedJSON, nullable=True), CheckConstraint("length >= 0", name="task_map_length_not_negative"), ForeignKeyConstraint( ["dag_id", "task_id", "run_id", "map_index"], [ "task_instance.dag_id", "task_instance.task_id", "task_instance.run_id", "task_instance.map_index", ], name="task_map_task_instance_fkey", ondelete="CASCADE", ), )
Remove TaskMap and map_index on TaskInstance.
def downgrade(): """Remove TaskMap and map_index on TaskInstance.""" op.drop_table("task_map") with op.batch_alter_table("task_reschedule") as batch_op: batch_op.drop_constraint("task_reschedule_ti_fkey", type_="foreignkey") batch_op.drop_index("idx_task_reschedule_dag_task_run") batch_op.drop_column("map_index", mssql_drop_default=True) op.execute("DELETE FROM task_instance WHERE map_index != -1") with op.batch_alter_table("task_instance") as batch_op: batch_op.drop_constraint("task_instance_pkey", type_="primary") batch_op.drop_column("map_index", mssql_drop_default=True) batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id"]) with op.batch_alter_table("task_reschedule") as batch_op: batch_op.create_foreign_key( "task_reschedule_ti_fkey", "task_instance", ["dag_id", "task_id", "run_id"], ["dag_id", "task_id", "run_id"], ondelete="CASCADE", ) batch_op.create_index( "idx_task_reschedule_dag_task_run", ["dag_id", "task_id", "run_id"], unique=False, )
Switch XCom table to use run_id. For performance reasons, this is done by creating a new table with needed data pre-populated, adding back constraints we need, and renaming it to replace the existing XCom table.
def upgrade(): """Switch XCom table to use run_id. For performance reasons, this is done by creating a new table with needed data pre-populated, adding back constraints we need, and renaming it to replace the existing XCom table. """ conn = op.get_bind() is_sqlite = conn.dialect.name == "sqlite" op.create_table("__airflow_tmp_xcom", *_get_new_xcom_columns()) xcom = Table("xcom", metadata, *_get_old_xcom_columns()) dagrun = _get_dagrun_table() query = select( [ dagrun.c.id, xcom.c.task_id, xcom.c.key, xcom.c.value, xcom.c.timestamp, xcom.c.dag_id, dagrun.c.run_id, literal_column("-1"), ], ).select_from( xcom.join( right=dagrun, onclause=and_( xcom.c.dag_id == dagrun.c.dag_id, xcom.c.execution_date == dagrun.c.execution_date, ), ), ) op.execute(f"INSERT INTO __airflow_tmp_xcom {query.selectable.compile(op.get_bind())}") if is_sqlite: op.execute("PRAGMA foreign_keys=off") op.drop_table("xcom") if is_sqlite: op.execute("PRAGMA foreign_keys=on") op.rename_table("__airflow_tmp_xcom", "xcom") with op.batch_alter_table("xcom") as batch_op: batch_op.create_primary_key("xcom_pkey", ["dag_run_id", "task_id", "map_index", "key"]) batch_op.create_index("idx_xcom_key", ["key"]) batch_op.create_foreign_key( "xcom_task_instance_fkey", "task_instance", ["dag_id", "task_id", "run_id", "map_index"], ["dag_id", "task_id", "run_id", "map_index"], ondelete="CASCADE", )
Switch XCom table back to use execution_date. Basically an inverse operation.
def downgrade(): """Switch XCom table back to use execution_date. Basically an inverse operation. """ conn = op.get_bind() op.create_table("__airflow_tmp_xcom", *_get_old_xcom_columns()) xcom = Table("xcom", metadata, *_get_new_xcom_columns()) # Remoe XCom entries from mapped tis. op.execute(xcom.delete().where(xcom.c.map_index != -1)) dagrun = _get_dagrun_table() query = select( [ xcom.c.key, xcom.c.value, xcom.c.timestamp, xcom.c.task_id, xcom.c.dag_id, dagrun.c.execution_date, ], ).select_from( xcom.join( right=dagrun, onclause=and_( xcom.c.dag_id == dagrun.c.dag_id, xcom.c.run_id == dagrun.c.run_id, ), ), ) op.execute(f"INSERT INTO __airflow_tmp_xcom {query.selectable.compile(op.get_bind())}") op.drop_table("xcom") op.rename_table("__airflow_tmp_xcom", "xcom") if conn.dialect.name == "mssql": constraints = get_mssql_table_constraints(conn, "xcom") pk, _ = constraints["PRIMARY KEY"].popitem() op.drop_constraint(pk, "xcom", type_="primary") op.create_primary_key( constraint_name="pk_xcom", table_name="xcom", columns=["dag_id", "task_id", "execution_date", "key"], )
Grabs a value from the source table ``dag_run`` and updates target with this value. :param dialect_name: dialect in use :param target_table: the table to update :param target_column: the column to update
def _update_value_from_dag_run( dialect_name: str, target_table: sa.Table, target_column: ColumnElement, join_columns: list[str], ) -> Update: """ Grabs a value from the source table ``dag_run`` and updates target with this value. :param dialect_name: dialect in use :param target_table: the table to update :param target_column: the column to update """ # for run_id: dag_id, execution_date # otherwise: dag_id, run_id condition_list = [getattr(dag_run.c, x) == getattr(target_table.c, x) for x in join_columns] condition = and_(*condition_list) if dialect_name == "sqlite": # Most SQLite versions don't support multi table update (and SQLA doesn't know about it anyway), so we # need to do a Correlated subquery update sub_q = select(dag_run.c[target_column.name]).where(condition) return target_table.update().values({target_column: sub_q}) else: return target_table.update().where(condition).values({target_column: dag_run.c[target_column.name]})
Apply Update migration for FAB tables to add missing constraints
def upgrade(): """Apply Update migration for FAB tables to add missing constraints""" conn = op.get_bind() if conn.dialect.name == "sqlite": op.execute("PRAGMA foreign_keys=OFF") with op.batch_alter_table("ab_view_menu", schema=None) as batch_op: batch_op.create_unique_constraint(batch_op.f("ab_view_menu_name_uq"), ["name"]) op.execute("PRAGMA foreign_keys=ON") elif conn.dialect.name == "mysql": with op.batch_alter_table("ab_register_user", schema=None) as batch_op: batch_op.alter_column("username", existing_type=sa.String(256), nullable=False) batch_op.alter_column("email", existing_type=sa.String(256), nullable=False) with op.batch_alter_table("ab_user", schema=None) as batch_op: batch_op.alter_column("username", existing_type=sa.String(256), nullable=False) batch_op.alter_column("email", existing_type=sa.String(256), nullable=False) elif conn.dialect.name == "mssql": with op.batch_alter_table("ab_register_user") as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, "ab_register_user") for k, _ in constraints.get("UNIQUE").items(): batch_op.drop_constraint(k, type_="unique") batch_op.alter_column("username", existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ["username"]) batch_op.alter_column("email", existing_type=sa.String(256), nullable=False) with op.batch_alter_table("ab_user") as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, "ab_user") for k, _ in constraints.get("UNIQUE").items(): batch_op.drop_constraint(k, type_="unique") batch_op.alter_column("username", existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ["username"]) batch_op.alter_column("email", existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ["email"])
Unapply Update migration for FAB tables to add missing constraints
def downgrade(): """Unapply Update migration for FAB tables to add missing constraints""" conn = op.get_bind() if conn.dialect.name == "sqlite": op.execute("PRAGMA foreign_keys=OFF") with op.batch_alter_table("ab_view_menu", schema=None) as batch_op: batch_op.drop_constraint("ab_view_menu_name_uq", type_="unique") op.execute("PRAGMA foreign_keys=ON") elif conn.dialect.name == "mysql": with op.batch_alter_table("ab_user", schema=None) as batch_op: batch_op.alter_column("email", existing_type=sa.String(256), nullable=True) batch_op.alter_column("username", existing_type=sa.String(256), nullable=True, unique=True) with op.batch_alter_table("ab_register_user", schema=None) as batch_op: batch_op.alter_column("email", existing_type=sa.String(256), nullable=True) batch_op.alter_column("username", existing_type=sa.String(256), nullable=True, unique=True) elif conn.dialect.name == "mssql": with op.batch_alter_table("ab_register_user") as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, "ab_register_user") for k, _ in constraints.get("UNIQUE").items(): batch_op.drop_constraint(k, type_="unique") batch_op.alter_column("username", existing_type=sa.String(256), nullable=False, unique=True) batch_op.create_unique_constraint(None, ["username"]) batch_op.alter_column("email", existing_type=sa.String(256), nullable=False, unique=True) with op.batch_alter_table("ab_user") as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, "ab_user") for k, _ in constraints.get("UNIQUE").items(): batch_op.drop_constraint(k, type_="unique") batch_op.alter_column("username", existing_type=sa.String(256), nullable=True) batch_op.create_unique_constraint(None, ["username"]) batch_op.alter_column("email", existing_type=sa.String(256), nullable=True, unique=True) batch_op.create_unique_constraint(None, ["email"])
Add map_index to Log.
def upgrade(): """Add map_index to Log.""" op.add_column("log", Column("map_index", Integer))
Remove map_index from Log.
def downgrade(): """Remove map_index from Log.""" with op.batch_alter_table("log") as batch_op: batch_op.drop_column("map_index")
Apply Add index for ``event`` column in ``log`` table.
def upgrade(): """Apply Add index for ``event`` column in ``log`` table.""" op.create_index("idx_log_event", "log", ["event"], unique=False)
Unapply Add index for ``event`` column in ``log`` table.
def downgrade(): """Unapply Add index for ``event`` column in ``log`` table.""" op.drop_index("idx_log_event", table_name="log")
Apply Add cascade to dag_tag foreignkey
def upgrade(): """Apply Add cascade to dag_tag foreignkey""" conn = op.get_bind() if conn.dialect.name in ["sqlite", "mysql"]: inspector = inspect(conn.engine) foreignkey = inspector.get_foreign_keys("dag_tag") with op.batch_alter_table( "dag_tag", ) as batch_op: batch_op.drop_constraint(foreignkey[0]["name"], type_="foreignkey") batch_op.create_foreign_key( "dag_tag_dag_id_fkey", "dag", ["dag_id"], ["dag_id"], ondelete="CASCADE" ) else: with op.batch_alter_table("dag_tag") as batch_op: if conn.dialect.name == "mssql": constraints = get_mssql_table_constraints(conn, "dag_tag") Fk, _ = constraints["FOREIGN KEY"].popitem() batch_op.drop_constraint(Fk, type_="foreignkey") if conn.dialect.name == "postgresql": batch_op.drop_constraint("dag_tag_dag_id_fkey", type_="foreignkey") batch_op.create_foreign_key( "dag_tag_dag_id_fkey", "dag", ["dag_id"], ["dag_id"], ondelete="CASCADE" )
Unapply Add cascade to dag_tag foreignkey
def downgrade(): """Unapply Add cascade to dag_tag foreignkey""" conn = op.get_bind() if conn.dialect.name == "sqlite": with op.batch_alter_table("dag_tag") as batch_op: batch_op.drop_constraint("dag_tag_dag_id_fkey", type_="foreignkey") batch_op.create_foreign_key("fk_dag_tag_dag_id_dag", "dag", ["dag_id"], ["dag_id"]) else: with op.batch_alter_table("dag_tag") as batch_op: batch_op.drop_constraint("dag_tag_dag_id_fkey", type_="foreignkey") batch_op.create_foreign_key( None, "dag", ["dag_id"], ["dag_id"], )
If user downgraded and is upgrading again, we have to check for existing indexes on mysql because we can't (and don't) drop them as part of the downgrade.
def _mysql_tables_where_indexes_already_present(conn): """ If user downgraded and is upgrading again, we have to check for existing indexes on mysql because we can't (and don't) drop them as part of the downgrade. """ to_check = [ ("xcom", "idx_xcom_task_instance"), ("task_reschedule", "idx_task_reschedule_dag_run"), ("task_fail", "idx_task_fail_task_instance"), ] tables = set() for tbl, idx in to_check: if conn.execute(text(f"show indexes from {tbl} where Key_name = '{idx}'")).first(): tables.add(tbl) return tables
Apply Add indexes for CASCADE deletes
def upgrade(): """Apply Add indexes for CASCADE deletes""" conn = op.get_bind() tables_to_skip = set() # mysql requires indexes for FKs, so adding had the effect of renaming, and we cannot remove. if conn.dialect.name == "mysql" and not context.is_offline_mode(): tables_to_skip.update(_mysql_tables_where_indexes_already_present(conn)) if "task_fail" not in tables_to_skip: with op.batch_alter_table("task_fail", schema=None) as batch_op: batch_op.create_index("idx_task_fail_task_instance", ["dag_id", "task_id", "run_id", "map_index"]) if "task_reschedule" not in tables_to_skip: with op.batch_alter_table("task_reschedule", schema=None) as batch_op: batch_op.create_index("idx_task_reschedule_dag_run", ["dag_id", "run_id"]) if "xcom" not in tables_to_skip: with op.batch_alter_table("xcom", schema=None) as batch_op: batch_op.create_index("idx_xcom_task_instance", ["dag_id", "task_id", "run_id", "map_index"])
Unapply Add indexes for CASCADE deletes
def downgrade(): """Unapply Add indexes for CASCADE deletes""" conn = op.get_bind() # mysql requires indexes for FKs, so adding had the effect of renaming, and we cannot remove. if conn.dialect.name == "mysql": return with op.batch_alter_table("xcom", schema=None) as batch_op: batch_op.drop_index("idx_xcom_task_instance") with op.batch_alter_table("task_reschedule", schema=None) as batch_op: batch_op.drop_index("idx_task_reschedule_dag_run") with op.batch_alter_table("task_fail", schema=None) as batch_op: batch_op.drop_index("idx_task_fail_task_instance")
Apply Add DagWarning model
def upgrade(): """Apply Add DagWarning model""" op.create_table( "dag_warning", sa.Column("dag_id", StringID(), primary_key=True), sa.Column("warning_type", sa.String(length=50), primary_key=True), sa.Column("message", sa.Text(), nullable=False), sa.Column("timestamp", TIMESTAMP, nullable=False), sa.ForeignKeyConstraint( ("dag_id",), ["dag.dag_id"], name="dcw_dag_id_fkey", ondelete="CASCADE", ), )
Unapply Add DagWarning model
def downgrade(): """Unapply Add DagWarning model""" op.drop_table("dag_warning")
Apply compare types between ORM and DB.
def upgrade(): """Apply compare types between ORM and DB.""" conn = op.get_bind() with op.batch_alter_table("connection", schema=None) as batch_op: batch_op.alter_column( "extra", existing_type=sa.TEXT(), type_=sa.Text(), existing_nullable=True, ) with op.batch_alter_table("log_template", schema=None) as batch_op: batch_op.alter_column( "created_at", existing_type=sa.DateTime(), type_=TIMESTAMP(), existing_nullable=False ) with op.batch_alter_table("serialized_dag", schema=None) as batch_op: # drop server_default batch_op.alter_column( "dag_hash", existing_type=sa.String(32), server_default=None, type_=sa.String(32), existing_nullable=False, ) with op.batch_alter_table("trigger", schema=None) as batch_op: batch_op.alter_column( "created_date", existing_type=sa.DateTime(), type_=TIMESTAMP(), existing_nullable=False ) if conn.dialect.name != "sqlite": return with op.batch_alter_table("serialized_dag", schema=None) as batch_op: batch_op.alter_column("fileloc_hash", existing_type=sa.Integer, type_=sa.BigInteger()) # Some sqlite date are not in db_types.TIMESTAMP. Convert these to TIMESTAMP. with op.batch_alter_table("dag", schema=None) as batch_op: batch_op.alter_column( "last_pickled", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) batch_op.alter_column( "last_expired", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("dag_pickle", schema=None) as batch_op: batch_op.alter_column( "created_dttm", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("dag_run", schema=None) as batch_op: batch_op.alter_column( "execution_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=False ) batch_op.alter_column( "start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) batch_op.alter_column( "end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("import_error", schema=None) as batch_op: batch_op.alter_column( "timestamp", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("job", schema=None) as batch_op: batch_op.alter_column( "start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) batch_op.alter_column( "end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) batch_op.alter_column( "latest_heartbeat", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("log", schema=None) as batch_op: batch_op.alter_column("dttm", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True) batch_op.alter_column( "execution_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("serialized_dag", schema=None) as batch_op: batch_op.alter_column( "last_updated", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=False ) with op.batch_alter_table("sla_miss", schema=None) as batch_op: batch_op.alter_column( "execution_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=False ) batch_op.alter_column( "timestamp", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("task_fail", schema=None) as batch_op: batch_op.alter_column( "start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) batch_op.alter_column( "end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column( "start_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) batch_op.alter_column( "end_date", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True ) batch_op.alter_column( "queued_dttm", existing_type=sa.DATETIME(), type_=TIMESTAMP(), existing_nullable=True )
Unapply compare types between ORM and DB.
def downgrade(): """Unapply compare types between ORM and DB.""" with op.batch_alter_table("connection", schema=None) as batch_op: batch_op.alter_column( "extra", existing_type=sa.Text(), type_=sa.TEXT(), existing_nullable=True, ) with op.batch_alter_table("log_template", schema=None) as batch_op: batch_op.alter_column( "created_at", existing_type=TIMESTAMP(), type_=sa.DateTime(), existing_nullable=False ) with op.batch_alter_table("serialized_dag", schema=None) as batch_op: # add server_default batch_op.alter_column( "dag_hash", existing_type=sa.String(32), server_default="Hash not calculated yet", type_=sa.String(32), existing_nullable=False, ) with op.batch_alter_table("trigger", schema=None) as batch_op: batch_op.alter_column( "created_date", existing_type=TIMESTAMP(), type_=sa.DateTime(), existing_nullable=False ) conn = op.get_bind() if conn.dialect.name != "sqlite": return with op.batch_alter_table("serialized_dag", schema=None) as batch_op: batch_op.alter_column("fileloc_hash", existing_type=sa.BigInteger, type_=sa.Integer()) # Change these column back to sa.DATETIME() with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column( "queued_dttm", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) with op.batch_alter_table("task_fail", schema=None) as batch_op: batch_op.alter_column( "end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) with op.batch_alter_table("sla_miss", schema=None) as batch_op: batch_op.alter_column( "timestamp", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "execution_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=False ) with op.batch_alter_table("serialized_dag", schema=None) as batch_op: batch_op.alter_column( "last_updated", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=False ) with op.batch_alter_table("log", schema=None) as batch_op: batch_op.alter_column( "execution_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column("dttm", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True) with op.batch_alter_table("job", schema=None) as batch_op: batch_op.alter_column( "latest_heartbeat", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) with op.batch_alter_table("import_error", schema=None) as batch_op: batch_op.alter_column( "timestamp", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) with op.batch_alter_table("dag_run", schema=None) as batch_op: batch_op.alter_column( "end_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "start_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "execution_date", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=False ) with op.batch_alter_table("dag_pickle", schema=None) as batch_op: batch_op.alter_column( "created_dttm", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) with op.batch_alter_table("dag", schema=None) as batch_op: batch_op.alter_column( "last_expired", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True ) batch_op.alter_column( "last_pickled", existing_type=TIMESTAMP(), type_=sa.DATETIME(), existing_nullable=True )
Apply Add Dataset model
def upgrade(): """Apply Add Dataset model""" _create_dataset_table() _create_dag_schedule_dataset_reference_table() _create_task_outlet_dataset_reference_table() _create_dataset_dag_run_queue_table() _create_dataset_event_table() _create_dataset_event_dag_run_table()
Unapply Add Dataset model
def downgrade(): """Unapply Add Dataset model""" op.drop_table("dag_schedule_dataset_reference") op.drop_table("task_outlet_dataset_reference") op.drop_table("dataset_dag_run_queue") op.drop_table("dagrun_dataset_event") op.drop_table("dataset_event") op.drop_table("dataset")
Apply Remove smart sensors
def upgrade(): """Apply Remove smart sensors""" op.drop_table("sensor_instance") """Minimal model definition for migrations""" task_instance = table("task_instance", column("state", sa.String)) op.execute(task_instance.update().where(task_instance.c.state == "sensing").values({"state": "failed"}))
Unapply Remove smart sensors
def downgrade(): """Unapply Remove smart sensors""" op.create_table( "sensor_instance", sa.Column("id", sa.Integer(), nullable=False), sa.Column("task_id", StringID(), nullable=False), sa.Column("dag_id", StringID(), nullable=False), sa.Column("execution_date", TIMESTAMP, nullable=False), sa.Column("state", sa.String(length=20), nullable=True), sa.Column("try_number", sa.Integer(), nullable=True), sa.Column("start_date", TIMESTAMP, nullable=True), sa.Column("operator", sa.String(length=1000), nullable=False), sa.Column("op_classpath", sa.String(length=1000), nullable=False), sa.Column("hashcode", sa.BigInteger(), nullable=False), sa.Column("shardcode", sa.Integer(), nullable=False), sa.Column("poke_context", sa.Text(), nullable=False), sa.Column("execution_context", sa.Text(), nullable=True), sa.Column("created_at", TIMESTAMP, default=func.now, nullable=False), sa.Column("updated_at", TIMESTAMP, default=func.now, nullable=False), sa.PrimaryKeyConstraint("id"), ) op.create_index("ti_primary_key", "sensor_instance", ["dag_id", "task_id", "execution_date"], unique=True) op.create_index("si_hashcode", "sensor_instance", ["hashcode"], unique=False) op.create_index("si_shardcode", "sensor_instance", ["shardcode"], unique=False) op.create_index("si_state_shard", "sensor_instance", ["state", "shardcode"], unique=False) op.create_index("si_updated_at", "sensor_instance", ["updated_at"], unique=False)
Apply Add ``DagOwnerAttributes`` table
def upgrade(): """Apply Add ``DagOwnerAttributes`` table""" op.create_table( "dag_owner_attributes", sa.Column("dag_id", StringID(), nullable=False), sa.Column("owner", sa.String(length=500), nullable=False), sa.Column("link", sa.String(length=500), nullable=False), sa.ForeignKeyConstraint(["dag_id"], ["dag.dag_id"], ondelete="CASCADE"), sa.PrimaryKeyConstraint("dag_id", "owner"), )
Unapply Add Dataset model
def downgrade(): """Unapply Add Dataset model""" op.drop_table("dag_owner_attributes")
Apply add processor_subdir to DagModel and SerializedDagModel
def upgrade(): """Apply add processor_subdir to DagModel and SerializedDagModel""" conn = op.get_bind() with op.batch_alter_table("dag") as batch_op: if conn.dialect.name == "mysql": batch_op.add_column(sa.Column("processor_subdir", sa.Text(length=2000), nullable=True)) else: batch_op.add_column(sa.Column("processor_subdir", sa.String(length=2000), nullable=True)) with op.batch_alter_table("serialized_dag") as batch_op: if conn.dialect.name == "mysql": batch_op.add_column(sa.Column("processor_subdir", sa.Text(length=2000), nullable=True)) else: batch_op.add_column(sa.Column("processor_subdir", sa.String(length=2000), nullable=True)) with op.batch_alter_table("callback_request") as batch_op: batch_op.drop_column("dag_directory") if conn.dialect.name == "mysql": batch_op.add_column(sa.Column("processor_subdir", sa.Text(length=2000), nullable=True)) else: batch_op.add_column(sa.Column("processor_subdir", sa.String(length=2000), nullable=True))
Unapply Add processor_subdir to DagModel and SerializedDagModel
def downgrade(): """Unapply Add processor_subdir to DagModel and SerializedDagModel""" conn = op.get_bind() with op.batch_alter_table("dag", schema=None) as batch_op: batch_op.drop_column("processor_subdir") with op.batch_alter_table("serialized_dag", schema=None) as batch_op: batch_op.drop_column("processor_subdir") with op.batch_alter_table("callback_request") as batch_op: batch_op.drop_column("processor_subdir") if conn.dialect.name == "mysql": batch_op.add_column(sa.Column("dag_directory", sa.Text(length=1000), nullable=True)) else: batch_op.add_column(sa.Column("dag_directory", sa.String(length=1000), nullable=True))
Apply migration. If these columns are already of the right type (i.e. created by our migration in 1.10.13 rather than FAB itself in an earlier version), this migration will issue an alter statement to change them to what they already are -- i.e. its a no-op. These tables are small (100 to low 1k rows at most), so it's not too costly to change them.
def upgrade(): """Apply migration. If these columns are already of the right type (i.e. created by our migration in 1.10.13 rather than FAB itself in an earlier version), this migration will issue an alter statement to change them to what they already are -- i.e. its a no-op. These tables are small (100 to low 1k rows at most), so it's not too costly to change them. """ conn = op.get_bind() if conn.dialect.name in ["mssql", "sqlite"]: # 1.10.12 didn't support SQL Server, so it couldn't have gotten this wrong --> nothing to correct # SQLite autoinc was "implicit" for an INTEGER NOT NULL PRIMARY KEY return for table in ( "ab_permission", "ab_view_menu", "ab_role", "ab_permission_view", "ab_permission_view_role", "ab_user", "ab_user_role", "ab_register_user", ): with op.batch_alter_table(table) as batch: kwargs = {} if conn.dialect.name == "postgresql": kwargs["server_default"] = sa.Sequence(f"{table}_id_seq").next_value() else: kwargs["autoincrement"] = True batch.alter_column("id", existing_type=sa.Integer(), existing_nullable=False, **kwargs)
Unapply add_missing_autoinc_fab
def downgrade(): """Unapply add_missing_autoinc_fab"""
Apply Add case-insensitive unique constraint
def upgrade(): """Apply Add case-insensitive unique constraint""" conn = op.get_bind() if conn.dialect.name == "postgresql": op.create_index("idx_ab_user_username", "ab_user", [sa.text("LOWER(username)")], unique=True) op.create_index( "idx_ab_register_user_username", "ab_register_user", [sa.text("LOWER(username)")], unique=True ) elif conn.dialect.name == "sqlite": with op.batch_alter_table("ab_user") as batch_op: batch_op.alter_column( "username", existing_type=sa.String(64), _type=sa.String(64, collation="NOCASE"), unique=True, nullable=False, ) with op.batch_alter_table("ab_register_user") as batch_op: batch_op.alter_column( "username", existing_type=sa.String(64), _type=sa.String(64, collation="NOCASE"), unique=True, nullable=False, )
Unapply Add case-insensitive unique constraint
def downgrade(): """Unapply Add case-insensitive unique constraint""" conn = op.get_bind() if conn.dialect.name == "postgresql": op.drop_index("idx_ab_user_username", table_name="ab_user") op.drop_index("idx_ab_register_user_username", table_name="ab_register_user") elif conn.dialect.name == "sqlite": with op.batch_alter_table("ab_user") as batch_op: batch_op.alter_column( "username", existing_type=sa.String(64, collation="NOCASE"), _type=sa.String(64), unique=True, nullable=False, ) with op.batch_alter_table("ab_register_user") as batch_op: batch_op.alter_column( "username", existing_type=sa.String(64, collation="NOCASE"), _type=sa.String(64), unique=True, nullable=False, )
Apply add updated_at column to DagRun and TaskInstance
def upgrade(): """Apply add updated_at column to DagRun and TaskInstance""" with op.batch_alter_table("task_instance") as batch_op: batch_op.add_column(sa.Column("updated_at", TIMESTAMP, default=sa.func.now)) with op.batch_alter_table("dag_run") as batch_op: batch_op.add_column(sa.Column("updated_at", TIMESTAMP, default=sa.func.now))
Unapply add updated_at column to DagRun and TaskInstance
def downgrade(): """Unapply add updated_at column to DagRun and TaskInstance""" with op.batch_alter_table("task_instance") as batch_op: batch_op.drop_column("updated_at") with op.batch_alter_table("dag_run") as batch_op: batch_op.drop_column("updated_at")
Apply Add DagRunNote and TaskInstanceNote
def upgrade(): """Apply Add DagRunNote and TaskInstanceNote""" op.create_table( "dag_run_note", sa.Column("user_id", sa.Integer(), nullable=True), sa.Column("dag_run_id", sa.Integer(), nullable=False), sa.Column( "content", sa.String(length=1000).with_variant(sa.Text(length=1000), "mysql"), nullable=True ), sa.Column("created_at", UtcDateTime(timezone=True), nullable=False), sa.Column("updated_at", UtcDateTime(timezone=True), nullable=False), sa.ForeignKeyConstraint( ("dag_run_id",), ["dag_run.id"], name="dag_run_note_dr_fkey", ondelete="CASCADE" ), sa.ForeignKeyConstraint(("user_id",), ["ab_user.id"], name="dag_run_note_user_fkey"), sa.PrimaryKeyConstraint("dag_run_id", name=op.f("dag_run_note_pkey")), ) op.create_table( "task_instance_note", sa.Column("user_id", sa.Integer(), nullable=True), sa.Column("task_id", StringID(), nullable=False), sa.Column("dag_id", StringID(), nullable=False), sa.Column("run_id", StringID(), nullable=False), sa.Column("map_index", sa.Integer(), nullable=False), sa.Column( "content", sa.String(length=1000).with_variant(sa.Text(length=1000), "mysql"), nullable=True ), sa.Column("created_at", UtcDateTime(timezone=True), nullable=False), sa.Column("updated_at", UtcDateTime(timezone=True), nullable=False), sa.PrimaryKeyConstraint( "task_id", "dag_id", "run_id", "map_index", name=op.f("task_instance_note_pkey") ), sa.ForeignKeyConstraint( ("dag_id", "task_id", "run_id", "map_index"), [ "task_instance.dag_id", "task_instance.task_id", "task_instance.run_id", "task_instance.map_index", ], name="task_instance_note_ti_fkey", ondelete="CASCADE", ), sa.ForeignKeyConstraint(("user_id",), ["ab_user.id"], name="task_instance_note_user_fkey"), )
Unapply Add DagRunNote and TaskInstanceNote
def downgrade(): """Unapply Add DagRunNote and TaskInstanceNote""" op.drop_table("task_instance_note") op.drop_table("dag_run_note")