response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Normalize a provided label to be of valid length and characters.
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash. | def make_safe_label_value(string: str) -> str:
"""
Normalize a provided label to be of valid length and characters.
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash.
"""
safe_label = re2.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string)
if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
safe_hash = md5(string.encode()).hexdigest()[:9]
safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash
return safe_label |
Transform a datetime string to use as a label.
Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but
not "_" let's
replace ":" with "_"
:param datetime_obj: datetime.datetime object
:return: ISO-like string representing the datetime | def datetime_to_label_safe_datestring(datetime_obj: datetime.datetime) -> str:
"""
Transform a datetime string to use as a label.
Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but
not "_" let's
replace ":" with "_"
:param datetime_obj: datetime.datetime object
:return: ISO-like string representing the datetime
"""
return datetime_obj.isoformat().replace(":", "_").replace("+", "_plus_") |
Transform a label back to a datetime object.
Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not
"_", let's
replace ":" with "_"
:param string: str
:return: datetime.datetime object | def label_safe_datestring_to_datetime(string: str) -> datetime.datetime:
"""
Transform a label back to a datetime object.
Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not
"_", let's
replace ":" with "_"
:param string: str
:return: datetime.datetime object
"""
return parser.parse(string.replace("_plus_", "+").replace("_", ":")) |
Merge objects.
:param base_obj: has the base attributes which are overwritten if they exist
in the client_obj and remain if they do not exist in the client_obj
:param client_obj: the object that the client wants to create.
:return: the merged objects | def merge_objects(base_obj, client_obj):
"""
Merge objects.
:param base_obj: has the base attributes which are overwritten if they exist
in the client_obj and remain if they do not exist in the client_obj
:param client_obj: the object that the client wants to create.
:return: the merged objects
"""
if not base_obj:
return client_obj
if not client_obj:
return base_obj
client_obj_cp = copy.deepcopy(client_obj)
if isinstance(base_obj, dict) and isinstance(client_obj_cp, dict):
base_obj_cp = copy.deepcopy(base_obj)
base_obj_cp.update(client_obj_cp)
return base_obj_cp
for base_key in base_obj.to_dict().keys():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val:
if not isinstance(client_obj_cp, dict):
setattr(client_obj_cp, base_key, base_val)
else:
client_obj_cp[base_key] = base_val
return client_obj_cp |
Add field values to existing objects.
:param base_obj: an object which has a property `field_name` that is a list
:param client_obj: an object which has a property `field_name` that is a list.
A copy of this object is returned with `field_name` modified
:param field_name: the name of the list field
:return: the client_obj with the property `field_name` being the two properties appended | def extend_object_field(base_obj, client_obj, field_name):
"""
Add field values to existing objects.
:param base_obj: an object which has a property `field_name` that is a list
:param client_obj: an object which has a property `field_name` that is a list.
A copy of this object is returned with `field_name` modified
:param field_name: the name of the list field
:return: the client_obj with the property `field_name` being the two properties appended
"""
client_obj_cp = copy.deepcopy(client_obj)
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if (not isinstance(base_obj_field, list) and base_obj_field is not None) or (
not isinstance(client_obj_field, list) and client_obj_field is not None
):
raise ValueError("The chosen field must be a list.")
if not base_obj_field:
return client_obj_cp
if not client_obj_field:
setattr(client_obj_cp, field_name, base_obj_field)
return client_obj_cp
appended_fields = base_obj_field + client_obj_field
setattr(client_obj_cp, field_name, appended_fields)
return client_obj_cp |
Normalize a provided label to be of valid length and characters.
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash. | def make_safe_label_value(string):
"""
Normalize a provided label to be of valid length and characters.
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash.
"""
safe_label = re2.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string)
if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
safe_hash = md5(string.encode()).hexdigest()[:9]
safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash
return safe_label |
Get default value when None. | def default_if_none(arg: bool | None) -> bool:
"""Get default value when None."""
return arg or False |
Get the lineage backend if defined in the configs. | def get_backend() -> LineageBackend | None:
"""Get the lineage backend if defined in the configs."""
clazz = conf.getimport("lineage", "backend", fallback=None)
if clazz:
if not issubclass(clazz, LineageBackend):
raise TypeError(
f"Your custom Lineage class `{clazz.__name__}` "
f"is not a subclass of `{LineageBackend.__name__}`."
)
else:
return clazz()
return None |
Conditionally send lineage to the backend.
Saves the lineage to XCom and if configured to do so sends it
to the backend. | def apply_lineage(func: T) -> T:
"""
Conditionally send lineage to the backend.
Saves the lineage to XCom and if configured to do so sends it
to the backend.
"""
_backend = get_backend()
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Lineage called with inlets: %s, outlets: %s", self.inlets, self.outlets)
ret_val = func(self, context, *args, **kwargs)
outlets = list(self.outlets)
inlets = list(self.inlets)
if outlets:
self.xcom_push(
context, key=PIPELINE_OUTLETS, value=outlets, execution_date=context["ti"].execution_date
)
if inlets:
self.xcom_push(
context, key=PIPELINE_INLETS, value=inlets, execution_date=context["ti"].execution_date
)
if _backend:
_backend.send_lineage(operator=self, inlets=self.inlets, outlets=self.outlets, context=context)
return ret_val
return cast(T, wrapper) |
Prepare the lineage inlets and outlets.
Inlets can be:
* "auto" -> picks up any outlets from direct upstream tasks that have outlets defined, as such that
if A -> B -> C and B does not have outlets but A does, these are provided as inlets.
* "list of task_ids" -> picks up outlets from the upstream task_ids
* "list of datasets" -> manually defined list of data | def prepare_lineage(func: T) -> T:
"""
Prepare the lineage inlets and outlets.
Inlets can be:
* "auto" -> picks up any outlets from direct upstream tasks that have outlets defined, as such that
if A -> B -> C and B does not have outlets but A does, these are provided as inlets.
* "list of task_ids" -> picks up outlets from the upstream task_ids
* "list of datasets" -> manually defined list of data
"""
@wraps(func)
def wrapper(self, context, *args, **kwargs):
from airflow.models.abstractoperator import AbstractOperator
self.log.debug("Preparing lineage inlets and outlets")
if isinstance(self.inlets, (str, AbstractOperator)):
self.inlets = [self.inlets]
if self.inlets and isinstance(self.inlets, list):
# get task_ids that are specified as parameter and make sure they are upstream
task_ids = {o for o in self.inlets if isinstance(o, str)}.union(
op.task_id for op in self.inlets if isinstance(op, AbstractOperator)
).intersection(self.get_flat_relative_ids(upstream=True))
# pick up unique direct upstream task_ids if AUTO is specified
if AUTO.upper() in self.inlets or AUTO.lower() in self.inlets:
task_ids = task_ids.union(task_ids.symmetric_difference(self.upstream_task_ids))
# Remove auto and task_ids
self.inlets = [i for i in self.inlets if not isinstance(i, str)]
# We manually create a session here since xcom_pull returns a LazyXComAccess iterator.
# If we do not pass a session a new session will be created, however that session will not be
# properly closed and will remain open. After we are done iterating we can safely close this
# session.
with create_session() as session:
_inlets = self.xcom_pull(
context, task_ids=task_ids, dag_id=self.dag_id, key=PIPELINE_OUTLETS, session=session
)
self.inlets.extend(i for it in _inlets for i in it)
elif self.inlets:
raise AttributeError("inlets is not a list, operator, string or attr annotated object")
if not isinstance(self.outlets, list):
self.outlets = [self.outlets]
# render inlets and outlets
self.inlets = [_render_object(i, context) for i in self.inlets]
self.outlets = [_render_object(i, context) for i in self.outlets]
self.log.debug("inlets: %s, outlets: %s", self.inlets, self.outlets)
return func(self, context, *args, **kwargs)
return cast(T, wrapper) |
Get singleton listener manager. | def get_listener_manager() -> ListenerManager:
"""Get singleton listener manager."""
global _listener_manager
if not _listener_manager:
_listener_manager = ListenerManager()
integrate_listener_plugins(_listener_manager)
return _listener_manager |
Execute when dag run state changes to RUNNING. | def on_dag_run_running(dag_run: DagRun, msg: str):
"""Execute when dag run state changes to RUNNING.""" |
Execute when dag run state changes to SUCCESS. | def on_dag_run_success(dag_run: DagRun, msg: str):
"""Execute when dag run state changes to SUCCESS.""" |
Execute when dag run state changes to FAIL. | def on_dag_run_failed(dag_run: DagRun, msg: str):
"""Execute when dag run state changes to FAIL.""" |
Execute when a new dataset is created. | def on_dataset_created(
dataset: Dataset,
):
"""Execute when a new dataset is created.""" |
Execute when dataset change is registered. | def on_dataset_changed(
dataset: Dataset,
):
"""Execute when dataset change is registered.""" |
Execute before Airflow component - jobs like scheduler, worker, or task runner starts.
It's guaranteed this will be called before any other plugin method.
:param component: Component that calls this method | def on_starting(component):
"""
Execute before Airflow component - jobs like scheduler, worker, or task runner starts.
It's guaranteed this will be called before any other plugin method.
:param component: Component that calls this method
""" |
Execute before Airflow component - jobs like scheduler, worker, or task runner stops.
It's guaranteed this will be called after any other plugin method.
:param component: Component that calls this method | def before_stopping(component):
"""
Execute before Airflow component - jobs like scheduler, worker, or task runner stops.
It's guaranteed this will be called after any other plugin method.
:param component: Component that calls this method
""" |
Execute when task state changes to RUNNING. previous_state can be None. | def on_task_instance_running(
previous_state: TaskInstanceState | None, task_instance: TaskInstance, session: Session | None
):
"""Execute when task state changes to RUNNING. previous_state can be None.""" |
Execute when task state changes to SUCCESS. previous_state can be None. | def on_task_instance_success(
previous_state: TaskInstanceState | None, task_instance: TaskInstance, session: Session | None
):
"""Execute when task state changes to SUCCESS. previous_state can be None.""" |
Execute when task state changes to FAIL. previous_state can be None. | def on_task_instance_failed(
previous_state: TaskInstanceState | None,
task_instance: TaskInstance,
error: None | str | BaseException,
session: Session | None,
):
"""Execute when task state changes to FAIL. previous_state can be None.""" |
Add or subtract days from a YYYY-MM-DD.
:param ds: anchor date in ``YYYY-MM-DD`` format to add to
:param days: number of days to add to the ds, you can use negative values
>>> ds_add("2015-01-01", 5)
'2015-01-06'
>>> ds_add("2015-01-06", -5)
'2015-01-01' | def ds_add(ds: str, days: int) -> str:
"""
Add or subtract days from a YYYY-MM-DD.
:param ds: anchor date in ``YYYY-MM-DD`` format to add to
:param days: number of days to add to the ds, you can use negative values
>>> ds_add("2015-01-01", 5)
'2015-01-06'
>>> ds_add("2015-01-06", -5)
'2015-01-01'
"""
if not days:
return str(ds)
dt = datetime.strptime(str(ds), "%Y-%m-%d") + timedelta(days=days)
return dt.strftime("%Y-%m-%d") |
Output datetime string in a given format.
:param ds: input string which contains a date
:param input_format: input string format. E.g. %Y-%m-%d
:param output_format: output string format E.g. %Y-%m-%d
>>> ds_format("2015-01-01", "%Y-%m-%d", "%m-%d-%y")
'01-01-15'
>>> ds_format("1/5/2015", "%m/%d/%Y", "%Y-%m-%d")
'2015-01-05' | def ds_format(ds: str, input_format: str, output_format: str) -> str:
"""
Output datetime string in a given format.
:param ds: input string which contains a date
:param input_format: input string format. E.g. %Y-%m-%d
:param output_format: output string format E.g. %Y-%m-%d
>>> ds_format("2015-01-01", "%Y-%m-%d", "%m-%d-%y")
'01-01-15'
>>> ds_format("1/5/2015", "%m/%d/%Y", "%Y-%m-%d")
'2015-01-05'
"""
return datetime.strptime(str(ds), input_format).strftime(output_format) |
Return a human-readable/approximate difference between datetimes.
When only one datetime is provided, the comparison will be based on now.
:param dt: The datetime to display the diff for
:param since: When to display the date from. If ``None`` then the diff is
between ``dt`` and now. | def datetime_diff_for_humans(dt: Any, since: DateTime | None = None) -> str:
"""
Return a human-readable/approximate difference between datetimes.
When only one datetime is provided, the comparison will be based on now.
:param dt: The datetime to display the diff for
:param since: When to display the date from. If ``None`` then the diff is
between ``dt`` and now.
"""
import pendulum
return pendulum.instance(dt).diff_for_humans(since) |
Get DataDog StatsD logger. | def get_dogstatsd_logger(cls) -> SafeDogStatsdLogger:
"""Get DataDog StatsD logger."""
from datadog import DogStatsd
metrics_validator: ListValidator
dogstatsd = DogStatsd(
host=conf.get("metrics", "statsd_host"),
port=conf.getint("metrics", "statsd_port"),
namespace=conf.get("metrics", "statsd_prefix"),
constant_tags=cls.get_constant_tags(),
)
if conf.get("metrics", "metrics_allow_list", fallback=None):
metrics_validator = AllowListValidator(conf.get("metrics", "metrics_allow_list"))
if conf.get("metrics", "metrics_block_list", fallback=None):
log.warning(
"Ignoring metrics_block_list as both metrics_allow_list "
"and metrics_block_list have been set"
)
elif conf.get("metrics", "metrics_block_list", fallback=None):
metrics_validator = BlockListValidator(conf.get("metrics", "metrics_block_list"))
else:
metrics_validator = AllowListValidator()
datadog_metrics_tags = conf.getboolean("metrics", "statsd_datadog_metrics_tags", fallback=True)
metric_tags_validator = BlockListValidator(conf.get("metrics", "statsd_disabled_tags", fallback=None))
return SafeDogStatsdLogger(dogstatsd, metrics_validator, datadog_metrics_tags, metric_tags_validator) |
Assembles the prefix, delimiter, and name and returns it as a string. | def full_name(name: str, *, prefix: str = DEFAULT_METRIC_NAME_PREFIX) -> str:
"""Assembles the prefix, delimiter, and name and returns it as a string."""
return f"{prefix}{DEFAULT_METRIC_NAME_DELIMITER}{name}" |
Return True if the provided name and prefix would result in a name that meets the OpenTelemetry standard.
Legal names are defined here:
https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax | def name_is_otel_safe(prefix: str, name: str) -> bool:
"""
Return True if the provided name and prefix would result in a name that meets the OpenTelemetry standard.
Legal names are defined here:
https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax
"""
return bool(stat_name_otel_handler(prefix, name, max_length=OTEL_NAME_MAX_LENGTH)) |
Given an OpenTelemetry Instrument, returns the type of the instrument as a string.
:param obj: An OTel Instrument or subclass
:returns: The type() of the Instrument without all the nested class info | def _type_as_str(obj: Instrument) -> str:
"""
Given an OpenTelemetry Instrument, returns the type of the instrument as a string.
:param obj: An OTel Instrument or subclass
:returns: The type() of the Instrument without all the nested class info
"""
# type().__name__ will return something like: '_Counter',
# this drops the leading underscore for cleaner logging.
return type(obj).__name__[1:] |
Verify that the provided name does not exceed OpenTelemetry's maximum length for metric names.
:param name: The original metric name
:returns: The name, truncated to an OTel-acceptable length if required. | def _get_otel_safe_name(name: str) -> str:
"""
Verify that the provided name does not exceed OpenTelemetry's maximum length for metric names.
:param name: The original metric name
:returns: The name, truncated to an OTel-acceptable length if required.
"""
otel_safe_name = name[:OTEL_NAME_MAX_LENGTH]
if name != otel_safe_name:
warnings.warn(
f"Metric name `{name}` exceeds OpenTelemetry's name length limit of "
f"{OTEL_NAME_MAX_LENGTH} characters and will be truncated to `{otel_safe_name}`.",
category=UserWarning,
stacklevel=2,
)
return otel_safe_name |
Add tags to stat with influxdb standard format if influxdb_tags_enabled is True. | def prepare_stat_with_tags(fn: T) -> T:
"""Add tags to stat with influxdb standard format if influxdb_tags_enabled is True."""
@wraps(fn)
def wrapper(
self, stat: str | None = None, *args, tags: dict[str, str] | None = None, **kwargs
) -> Callable[[str], str]:
if self.influxdb_tags_enabled:
if stat is not None and tags is not None:
for k, v in tags.items():
if self.metric_tags_validator.test(k):
if all(c not in [",", "="] for c in v + k):
stat += f",{k}={v}"
else:
log.error("Dropping invalid tag: %s=%s.", k, v)
return fn(self, stat, *args, tags=tags, **kwargs)
return cast(T, wrapper) |
Return logger for StatsD. | def get_statsd_logger(cls) -> SafeStatsdLogger:
"""Return logger for StatsD."""
# no need to check for the scheduler/statsd_on -> this method is only called when it is set
# and previously it would crash with None is callable if it was called without it.
from statsd import StatsClient
stats_class = conf.getimport("metrics", "statsd_custom_client_path", fallback=None)
if stats_class:
if not issubclass(stats_class, StatsClient):
raise AirflowConfigException(
"Your custom StatsD client must extend the statsd.StatsClient in order to ensure "
"backwards compatibility."
)
else:
log.info("Successfully loaded custom StatsD client")
else:
stats_class = StatsClient
statsd = stats_class(
host=conf.get("metrics", "statsd_host"),
port=conf.getint("metrics", "statsd_port"),
prefix=conf.get("metrics", "statsd_prefix"),
)
influxdb_tags_enabled = conf.getboolean("metrics", "statsd_influxdb_enabled", fallback=False)
metric_tags_validator = BlockListValidator(conf.get("metrics", "statsd_disabled_tags", fallback=None))
return SafeStatsdLogger(statsd, get_validator(), influxdb_tags_enabled, metric_tags_validator) |
Check if stat name contains invalid characters; logs and does not emit stats if name is invalid. | def validate_stat(fn: Callable) -> Callable:
"""Check if stat name contains invalid characters; logs and does not emit stats if name is invalid."""
@wraps(fn)
def wrapper(self, stat: str | None = None, *args, **kwargs) -> Callable | None:
try:
if stat is not None:
handler_stat_name_func = get_current_handler_stat_name_func()
stat = handler_stat_name_func(stat)
return fn(self, stat, *args, **kwargs)
except InvalidStatsNameException:
log.exception("Invalid stat name: %s.", stat)
return None
return cast(Callable, wrapper) |
Verify that a proposed prefix and name combination will meet OpenTelemetry naming standards.
See: https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax
:param stat_prefix: The proposed prefix applied to all metric names.
:param stat_name: The proposed name.
:param max_length: The max length of the combined prefix and name; defaults to the max length
as defined in the OpenTelemetry standard, but can be overridden.
:returns: Returns the approved combined name or raises an InvalidStatsNameException. | def stat_name_otel_handler(
stat_prefix: str,
stat_name: str,
max_length: int = OTEL_NAME_MAX_LENGTH,
) -> str:
"""
Verify that a proposed prefix and name combination will meet OpenTelemetry naming standards.
See: https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax
:param stat_prefix: The proposed prefix applied to all metric names.
:param stat_name: The proposed name.
:param max_length: The max length of the combined prefix and name; defaults to the max length
as defined in the OpenTelemetry standard, but can be overridden.
:returns: Returns the approved combined name or raises an InvalidStatsNameException.
"""
proposed_stat_name: str = f"{stat_prefix}.{stat_name}"
name_length_exemption: bool = False
matched_exemption: str = ""
# This test case is here to enforce that the values can not be None and
# must be a valid String. Without this test here, those values get cast
# to a string and pass when they should not, potentially resulting in
# metrics named "airflow.None", "airflow.42", or "None.42" for example.
if not (isinstance(stat_name, str) and isinstance(stat_prefix, str)):
raise InvalidStatsNameException("Stat name and prefix must both be strings.")
if len(proposed_stat_name) > OTEL_NAME_MAX_LENGTH:
# If the name is in the exceptions list, do not fail it for being too long.
# It may still be deemed invalid for other reasons below.
for exemption in BACK_COMPAT_METRIC_NAMES:
if re2.match(exemption, stat_name):
# There is a back-compat exception for this name; proceed
name_length_exemption = True
matched_exemption = exemption.pattern
break
else:
raise InvalidStatsNameException(
f"Invalid stat name: {proposed_stat_name}. Please see "
f"https://opentelemetry.io/docs/reference/specification/metrics/api/#instrument-name-syntax"
)
# `stat_name_default_handler` throws InvalidStatsNameException if the
# provided value is not valid or returns the value if it is. We don't
# need the return value but will make use of the validation checks. If
# no exception is thrown, then the proposed name meets OTel requirements.
stat_name_default_handler(proposed_stat_name, max_length=999 if name_length_exemption else max_length)
# This warning is down here instead of up above because the exemption only
# applies to the length and a name may still be invalid for other reasons.
if name_length_exemption:
warnings.warn(
f"Stat name {stat_name} matches exemption {matched_exemption} and "
f"will be truncated to {proposed_stat_name[:OTEL_NAME_MAX_LENGTH]}. "
f"This stat name will be deprecated in the future and replaced with "
f"a shorter name combined with Attributes/Tags.",
MetricNameLengthExemptionWarning,
stacklevel=2,
)
return proposed_stat_name |
Validate the metric stat name.
Apply changes when necessary and return the transformed stat name. | def stat_name_default_handler(
stat_name: str, max_length: int = 250, allowed_chars: Iterable[str] = ALLOWED_CHARACTERS
) -> str:
"""
Validate the metric stat name.
Apply changes when necessary and return the transformed stat name.
"""
if not isinstance(stat_name, str):
raise InvalidStatsNameException("The stat_name has to be a string")
if len(stat_name) > max_length:
raise InvalidStatsNameException(
f"The stat_name ({stat_name}) has to be less than {max_length} characters."
)
if any(c not in allowed_chars for c in stat_name):
raise InvalidStatsNameException(
f"The stat name ({stat_name}) has to be composed of ASCII "
f"alphabets, numbers, or the underscore, dot, or dash characters."
)
return stat_name |
Get Stat Name Handler from airflow.cfg. | def get_current_handler_stat_name_func() -> Callable[[str], str]:
"""Get Stat Name Handler from airflow.cfg."""
handler = conf.getimport("metrics", "stat_name_handler")
if handler is None:
if conf.get("metrics", "statsd_influxdb_enabled", fallback=False):
handler = partial(stat_name_default_handler, allowed_chars={*ALLOWED_CHARACTERS, ",", "="})
else:
handler = stat_name_default_handler
return handler |
Filter objects for autogenerating revisions. | def include_object(_, name, type_, *args):
"""Filter objects for autogenerating revisions."""
# Ignore _anything_ to do with Celery, or FlaskSession's tables
if type_ == "table" and (name.startswith("celery_") or name == "session"):
return False
else:
return True |
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output. | def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=settings.SQL_ALCHEMY_CONN,
target_metadata=target_metadata,
literal_binds=True,
compare_type=compare_type,
compare_server_default=compare_server_default,
render_as_batch=True,
)
with context.begin_transaction():
context.run_migrations() |
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. | def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
with contextlib.ExitStack() as stack:
connection = config.attributes.get("connection", None)
if not connection:
connection = stack.push(settings.engine.connect())
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=compare_type,
compare_server_default=compare_server_default,
include_object=include_object,
render_as_batch=True,
)
with context.begin_transaction():
context.run_migrations() |
Return the primary and unique constraint along with column name.
Some tables like `task_instance` are missing the primary key constraint
name and the name is auto-generated by the SQL server, so this function
helps to retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table | def get_mssql_table_constraints(conn, table_name) -> dict[str, dict[str, list[str]]]:
"""
Return the primary and unique constraint along with column name.
Some tables like `task_instance` are missing the primary key constraint
name and the name is auto-generated by the SQL server, so this function
helps to retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table
"""
query = text(
f"""SELECT tc.CONSTRAINT_NAME , tc.CONSTRAINT_TYPE, ccu.COLUMN_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu ON ccu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
WHERE tc.TABLE_NAME = '{table_name}' AND
(tc.CONSTRAINT_TYPE = 'PRIMARY KEY' or UPPER(tc.CONSTRAINT_TYPE) = 'UNIQUE'
or UPPER(tc.CONSTRAINT_TYPE) = 'FOREIGN KEY')
"""
)
result = conn.execute(query).fetchall()
constraint_dict = defaultdict(lambda: defaultdict(list))
for constraint, constraint_type, col_name in result:
constraint_dict[constraint_type][constraint].append(col_name)
return constraint_dict |
Create Index. | def upgrade():
"""Create Index."""
op.create_index("idx_xcom_dag_task_date", "xcom", ["dag_id", "task_id", "execution_date"], unique=False) |
Drop Index. | def downgrade():
"""Drop Index."""
op.drop_index("idx_xcom_dag_task_date", table_name="xcom") |
Add pid column to task_instance table. | def upgrade():
"""Add pid column to task_instance table."""
op.add_column("task_instance", sa.Column("pid", sa.Integer)) |
Drop pid column from task_instance table. | def downgrade():
"""Drop pid column from task_instance table."""
op.drop_column("task_instance", "pid") |
Fix broken foreign-key constraint for existing SQLite DBs. | def upgrade():
"""Fix broken foreign-key constraint for existing SQLite DBs."""
conn = op.get_bind()
if conn.dialect.name == "sqlite":
# Fix broken foreign-key constraint for existing SQLite DBs.
#
# Re-define tables and use copy_from to avoid reflection
# which would fail because referenced user table doesn't exist.
#
# Use batch_alter_table to support SQLite workaround.
chart_table = sa.Table(
"chart",
sa.MetaData(),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("label", sa.String(length=200), nullable=True),
sa.Column("conn_id", sa.String(length=250), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("chart_type", sa.String(length=100), nullable=True),
sa.Column("sql_layout", sa.String(length=50), nullable=True),
sa.Column("sql", sa.Text(), nullable=True),
sa.Column("y_log_scale", sa.Boolean(), nullable=True),
sa.Column("show_datatable", sa.Boolean(), nullable=True),
sa.Column("show_sql", sa.Boolean(), nullable=True),
sa.Column("height", sa.Integer(), nullable=True),
sa.Column("default_params", sa.String(length=5000), nullable=True),
sa.Column("x_is_date", sa.Boolean(), nullable=True),
sa.Column("iteration_no", sa.Integer(), nullable=True),
sa.Column("last_modified", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
with op.batch_alter_table("chart", copy_from=chart_table) as batch_op:
batch_op.create_foreign_key("chart_user_id_fkey", "users", ["user_id"], ["id"]) |
Make TaskInstance.pool field not nullable. | def upgrade():
"""Make TaskInstance.pool field not nullable."""
with create_session() as session:
session.query(TaskInstance).filter(TaskInstance.pool.is_(None)).update(
{TaskInstance.pool: "default_pool"}, synchronize_session=False
) # Avoid select updated rows
session.commit()
conn = op.get_bind()
if conn.dialect.name == "mssql":
op.drop_index(index_name="ti_pool", table_name="task_instance")
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column(
column_name="pool",
type_=sa.String(50),
nullable=False,
)
if conn.dialect.name == "mssql":
op.create_index(
index_name="ti_pool", table_name="task_instance", columns=["pool", "state", "priority_weight"]
) |
Make TaskInstance.pool field nullable. | def downgrade():
"""Make TaskInstance.pool field nullable."""
conn = op.get_bind()
if conn.dialect.name == "mssql":
op.drop_index(index_name="ti_pool", table_name="task_instance")
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column(
column_name="pool",
type_=sa.String(50),
nullable=True,
)
if conn.dialect.name == "mssql":
op.create_index(
index_name="ti_pool", table_name="task_instance", columns=["pool", "state", "priority_weight"]
)
with create_session() as session:
session.query(TaskInstance).filter(TaskInstance.pool == "default_pool").update(
{TaskInstance.pool: None}, synchronize_session=False
) # Avoid select updated rows
session.commit() |
Upgrade version. | def upgrade():
"""Upgrade version."""
json_type = sa.JSON
conn = op.get_bind()
if conn.dialect.name != "postgresql":
# Mysql 5.7+/MariaDB 10.2.3 has JSON support. Rather than checking for
# versions, check for the function existing.
try:
conn.execute(text("SELECT JSON_VALID(1)")).fetchone()
except (sa.exc.OperationalError, sa.exc.ProgrammingError):
json_type = sa.Text
op.create_table(
"serialized_dag",
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("fileloc", sa.String(length=2000), nullable=False),
sa.Column("fileloc_hash", sa.Integer(), nullable=False),
sa.Column("data", json_type(), nullable=False),
sa.Column("last_updated", sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint("dag_id"),
)
op.create_index("idx_fileloc_hash", "serialized_dag", ["fileloc_hash"])
if conn.dialect.name == "mysql":
conn.execute(text("SET time_zone = '+00:00'"))
cur = conn.execute(text("SELECT @@explicit_defaults_for_timestamp"))
res = cur.fetchall()
if res[0][0] == 0:
raise Exception("Global variable explicit_defaults_for_timestamp needs to be on (1) for mysql")
op.alter_column(
table_name="serialized_dag",
column_name="last_updated",
type_=mysql.TIMESTAMP(fsp=6),
nullable=False,
)
else:
# sqlite and mssql datetime are fine as is. Therefore, not converting
if conn.dialect.name in ("sqlite", "mssql"):
return
# we try to be database agnostic, but not every db (e.g. sqlserver)
# supports per session time zones
if conn.dialect.name == "postgresql":
conn.execute(text("set timezone=UTC"))
op.alter_column(
table_name="serialized_dag",
column_name="last_updated",
type_=sa.TIMESTAMP(timezone=True),
) |
Downgrade version. | def downgrade():
"""Downgrade version."""
op.drop_table("serialized_dag") |
Apply Add ``root_dag_id`` to ``DAG`` | def upgrade():
"""Apply Add ``root_dag_id`` to ``DAG``"""
op.add_column("dag", sa.Column("root_dag_id", StringID(), nullable=True))
op.create_index("idx_root_dag_id", "dag", ["root_dag_id"], unique=False) |
Unapply Add ``root_dag_id`` to ``DAG`` | def downgrade():
"""Unapply Add ``root_dag_id`` to ``DAG``"""
op.drop_index("idx_root_dag_id", table_name="dag")
op.drop_column("dag", "root_dag_id") |
Change datetime to datetime2(6) when using MSSQL as backend. | def upgrade():
"""Change datetime to datetime2(6) when using MSSQL as backend."""
conn = op.get_bind()
if conn.dialect.name == "mssql":
result = conn.execute(
text(
"""SELECT CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '8%' THEN '2000' WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '9%' THEN '2005' ELSE '2005Plus' END AS MajorVersion"""
)
).fetchone()
mssql_version = result[0]
if mssql_version in ("2000", "2005"):
return
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.drop_index("idx_task_reschedule_dag_task_date")
task_reschedule_batch_op.drop_constraint("task_reschedule_dag_task_date_fkey", type_="foreignkey")
task_reschedule_batch_op.alter_column(
column_name="execution_date",
type_=mssql.DATETIME2(precision=6),
nullable=False,
)
task_reschedule_batch_op.alter_column(
column_name="start_date", type_=mssql.DATETIME2(precision=6)
)
task_reschedule_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME2(precision=6))
task_reschedule_batch_op.alter_column(
column_name="reschedule_date", type_=mssql.DATETIME2(precision=6)
)
with op.batch_alter_table("task_instance") as task_instance_batch_op:
task_instance_batch_op.drop_index("ti_state_lkp")
task_instance_batch_op.drop_index("ti_dag_date")
modify_execution_date_with_constraint(
conn, task_instance_batch_op, "task_instance", mssql.DATETIME2(precision=6), False
)
task_instance_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME2(precision=6))
task_instance_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME2(precision=6))
task_instance_batch_op.alter_column(column_name="queued_dttm", type_=mssql.DATETIME2(precision=6))
task_instance_batch_op.create_index(
"ti_state_lkp", ["dag_id", "task_id", "execution_date"], unique=False
)
task_instance_batch_op.create_index("ti_dag_date", ["dag_id", "execution_date"], unique=False)
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.create_foreign_key(
"task_reschedule_dag_task_date_fkey",
"task_instance",
["task_id", "dag_id", "execution_date"],
["task_id", "dag_id", "execution_date"],
ondelete="CASCADE",
)
task_reschedule_batch_op.create_index(
"idx_task_reschedule_dag_task_date", ["dag_id", "task_id", "execution_date"], unique=False
)
with op.batch_alter_table("dag_run") as dag_run_batch_op:
modify_execution_date_with_constraint(
conn, dag_run_batch_op, "dag_run", mssql.DATETIME2(precision=6), None
)
dag_run_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME2(precision=6))
dag_run_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="log", column_name="execution_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="log", column_name="dttm", type_=mssql.DATETIME2(precision=6))
with op.batch_alter_table("sla_miss") as sla_miss_batch_op:
modify_execution_date_with_constraint(
conn, sla_miss_batch_op, "sla_miss", mssql.DATETIME2(precision=6), False
)
sla_miss_batch_op.alter_column(column_name="timestamp", type_=mssql.DATETIME2(precision=6))
op.drop_index("idx_task_fail_dag_task_date", table_name="task_fail")
op.alter_column(
table_name="task_fail", column_name="execution_date", type_=mssql.DATETIME2(precision=6)
)
op.alter_column(table_name="task_fail", column_name="start_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="task_fail", column_name="end_date", type_=mssql.DATETIME2(precision=6))
op.create_index(
"idx_task_fail_dag_task_date", "task_fail", ["dag_id", "task_id", "execution_date"], unique=False
)
op.drop_index("idx_xcom_dag_task_date", table_name="xcom")
op.alter_column(table_name="xcom", column_name="execution_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="xcom", column_name="timestamp", type_=mssql.DATETIME2(precision=6))
op.create_index(
"idx_xcom_dag_task_date", "xcom", ["dag_id", "task_id", "execution_date"], unique=False
)
op.alter_column(
table_name="dag", column_name="last_scheduler_run", type_=mssql.DATETIME2(precision=6)
)
op.alter_column(table_name="dag", column_name="last_pickled", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="dag", column_name="last_expired", type_=mssql.DATETIME2(precision=6))
op.alter_column(
table_name="dag_pickle", column_name="created_dttm", type_=mssql.DATETIME2(precision=6)
)
op.alter_column(
table_name="import_error", column_name="timestamp", type_=mssql.DATETIME2(precision=6)
)
op.drop_index("job_type_heart", table_name="job")
op.drop_index("idx_job_state_heartbeat", table_name="job")
op.alter_column(table_name="job", column_name="start_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="job", column_name="end_date", type_=mssql.DATETIME2(precision=6))
op.alter_column(table_name="job", column_name="latest_heartbeat", type_=mssql.DATETIME2(precision=6))
op.create_index("idx_job_state_heartbeat", "job", ["state", "latest_heartbeat"], unique=False)
op.create_index("job_type_heart", "job", ["job_type", "latest_heartbeat"], unique=False) |
Change datetime2(6) back to datetime. | def downgrade():
"""Change datetime2(6) back to datetime."""
conn = op.get_bind()
if conn.dialect.name == "mssql":
result = conn.execute(
text(
"""SELECT CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '8%' THEN '2000' WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '9%' THEN '2005' ELSE '2005Plus' END AS MajorVersion"""
)
).fetchone()
mssql_version = result[0]
if mssql_version in ("2000", "2005"):
return
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.drop_index("idx_task_reschedule_dag_task_date")
task_reschedule_batch_op.drop_constraint("task_reschedule_dag_task_date_fkey", type_="foreignkey")
task_reschedule_batch_op.alter_column(
column_name="execution_date", type_=mssql.DATETIME, nullable=False
)
task_reschedule_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME)
task_reschedule_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME)
task_reschedule_batch_op.alter_column(column_name="reschedule_date", type_=mssql.DATETIME)
with op.batch_alter_table("task_instance") as task_instance_batch_op:
task_instance_batch_op.drop_index("ti_state_lkp")
task_instance_batch_op.drop_index("ti_dag_date")
modify_execution_date_with_constraint(
conn, task_instance_batch_op, "task_instance", mssql.DATETIME, False
)
task_instance_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME)
task_instance_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME)
task_instance_batch_op.alter_column(column_name="queued_dttm", type_=mssql.DATETIME)
task_instance_batch_op.create_index(
"ti_state_lkp", ["dag_id", "task_id", "execution_date"], unique=False
)
task_instance_batch_op.create_index("ti_dag_date", ["dag_id", "execution_date"], unique=False)
with op.batch_alter_table("task_reschedule") as task_reschedule_batch_op:
task_reschedule_batch_op.create_foreign_key(
"task_reschedule_dag_task_date_fkey",
"task_instance",
["task_id", "dag_id", "execution_date"],
["task_id", "dag_id", "execution_date"],
ondelete="CASCADE",
)
task_reschedule_batch_op.create_index(
"idx_task_reschedule_dag_task_date", ["dag_id", "task_id", "execution_date"], unique=False
)
with op.batch_alter_table("dag_run") as dag_run_batch_op:
modify_execution_date_with_constraint(conn, dag_run_batch_op, "dag_run", mssql.DATETIME, None)
dag_run_batch_op.alter_column(column_name="start_date", type_=mssql.DATETIME)
dag_run_batch_op.alter_column(column_name="end_date", type_=mssql.DATETIME)
op.alter_column(table_name="log", column_name="execution_date", type_=mssql.DATETIME)
op.alter_column(table_name="log", column_name="dttm", type_=mssql.DATETIME)
with op.batch_alter_table("sla_miss") as sla_miss_batch_op:
modify_execution_date_with_constraint(conn, sla_miss_batch_op, "sla_miss", mssql.DATETIME, False)
sla_miss_batch_op.alter_column(column_name="timestamp", type_=mssql.DATETIME)
op.drop_index("idx_task_fail_dag_task_date", table_name="task_fail")
op.alter_column(table_name="task_fail", column_name="execution_date", type_=mssql.DATETIME)
op.alter_column(table_name="task_fail", column_name="start_date", type_=mssql.DATETIME)
op.alter_column(table_name="task_fail", column_name="end_date", type_=mssql.DATETIME)
op.create_index(
"idx_task_fail_dag_task_date", "task_fail", ["dag_id", "task_id", "execution_date"], unique=False
)
op.drop_index("idx_xcom_dag_task_date", table_name="xcom")
op.alter_column(table_name="xcom", column_name="execution_date", type_=mssql.DATETIME)
op.alter_column(table_name="xcom", column_name="timestamp", type_=mssql.DATETIME)
op.create_index(
"idx_xcom_dag_task_date", "xcom", ["dag_id", "task_ild", "execution_date"], unique=False
)
op.alter_column(table_name="dag", column_name="last_scheduler_run", type_=mssql.DATETIME)
op.alter_column(table_name="dag", column_name="last_pickled", type_=mssql.DATETIME)
op.alter_column(table_name="dag", column_name="last_expired", type_=mssql.DATETIME)
op.alter_column(table_name="dag_pickle", column_name="created_dttm", type_=mssql.DATETIME)
op.alter_column(table_name="import_error", column_name="timestamp", type_=mssql.DATETIME)
op.drop_index("job_type_heart", table_name="job")
op.drop_index("idx_job_state_heartbeat", table_name="job")
op.alter_column(table_name="job", column_name="start_date", type_=mssql.DATETIME)
op.alter_column(table_name="job", column_name="end_date", type_=mssql.DATETIME)
op.alter_column(table_name="job", column_name="latest_heartbeat", type_=mssql.DATETIME)
op.create_index("idx_job_state_heartbeat", "job", ["state", "latest_heartbeat"], unique=False)
op.create_index("job_type_heart", "job", ["job_type", "latest_heartbeat"], unique=False) |
Return primary and unique constraint along with column name.
This function return primary and unique constraint
along with column name. some tables like task_instance
is missing primary key constraint name and the name is
auto-generated by sql server. so this function helps to
retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table | def get_table_constraints(conn, table_name) -> dict[tuple[str, str], list[str]]:
"""Return primary and unique constraint along with column name.
This function return primary and unique constraint
along with column name. some tables like task_instance
is missing primary key constraint name and the name is
auto-generated by sql server. so this function helps to
retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table
"""
query = text(
f"""SELECT tc.CONSTRAINT_NAME , tc.CONSTRAINT_TYPE, ccu.COLUMN_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu ON ccu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
WHERE tc.TABLE_NAME = '{table_name}' AND
(tc.CONSTRAINT_TYPE = 'PRIMARY KEY' or UPPER(tc.CONSTRAINT_TYPE) = 'UNIQUE')
"""
)
result = conn.execute(query).fetchall()
constraint_dict = defaultdict(list)
for constraint, constraint_type, column in result:
constraint_dict[(constraint, constraint_type)].append(column)
return constraint_dict |
Reorder the columns for creating constraint.
Preserve primary key ordering
``['task_id', 'dag_id', 'execution_date']``
:param columns: columns retrieved from DB related to constraint
:return: ordered column | def reorder_columns(columns):
"""Reorder the columns for creating constraint.
Preserve primary key ordering
``['task_id', 'dag_id', 'execution_date']``
:param columns: columns retrieved from DB related to constraint
:return: ordered column
"""
ordered_columns = []
for column in ["task_id", "dag_id", "execution_date"]:
if column in columns:
ordered_columns.append(column)
for column in columns:
if column not in ["task_id", "dag_id", "execution_date"]:
ordered_columns.append(column)
return ordered_columns |
Drop a primary key or unique constraint.
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table | def drop_constraint(operator, constraint_dict):
"""Drop a primary key or unique constraint.
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if "execution_date" in columns:
if constraint[1].lower().startswith("primary"):
operator.drop_constraint(constraint[0], type_="primary")
elif constraint[1].lower().startswith("unique"):
operator.drop_constraint(constraint[0], type_="unique") |
Create a primary key or unique constraint.
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table | def create_constraint(operator, constraint_dict):
"""Create a primary key or unique constraint.
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if "execution_date" in columns:
if constraint[1].lower().startswith("primary"):
operator.create_primary_key(constraint_name=constraint[0], columns=reorder_columns(columns))
elif constraint[1].lower().startswith("unique"):
operator.create_unique_constraint(
constraint_name=constraint[0], columns=reorder_columns(columns)
) |
Change type of column execution_date.
Helper function changes type of column execution_date by
dropping and recreating any primary/unique constraint associated with
the column
:param conn: sql connection object
:param batch_operator: batch_alter_table for the table
:param table_name: table name
:param type_: DB column type
:param nullable: nullable (boolean)
:return: a dictionary of ((constraint name, constraint type), column name) of table | def modify_execution_date_with_constraint(conn, batch_operator, table_name, type_, nullable) -> None:
"""Change type of column execution_date.
Helper function changes type of column execution_date by
dropping and recreating any primary/unique constraint associated with
the column
:param conn: sql connection object
:param batch_operator: batch_alter_table for the table
:param table_name: table name
:param type_: DB column type
:param nullable: nullable (boolean)
:return: a dictionary of ((constraint name, constraint type), column name) of table
"""
constraint_dict = get_table_constraints(conn, table_name)
drop_constraint(batch_operator, constraint_dict)
batch_operator.alter_column(
column_name="execution_date",
type_=type_,
nullable=nullable,
)
create_constraint(batch_operator, constraint_dict) |
Increase column size from 50 to 256 characters,
caused by broker backends that might use unusually large queue names. | def upgrade():
"""
Increase column size from 50 to 256 characters,
caused by broker backends that might use unusually large queue names.
"""
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column("queue", type_=sa.String(256)) |
Revert column size from 256 to 50 characters, might result in data loss. | def downgrade():
"""Revert column size from 256 to 50 characters, might result in data loss."""
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column("queue", type_=sa.String(50)) |
Drop dag_stats table | def upgrade():
"""Drop dag_stats table"""
op.drop_table("dag_stats") |
Create dag_stats table | def downgrade():
"""Create dag_stats table"""
op.create_table(
"dag_stats",
sa.Column("dag_id", sa.String(length=250), nullable=False),
sa.Column("state", sa.String(length=50), nullable=False),
sa.Column("count", sa.Integer(), nullable=False, default=0),
sa.Column("dirty", sa.Boolean(), nullable=False, default=False),
sa.PrimaryKeyConstraint("dag_id", "state"),
) |
Apply Increase length for connection password | def upgrade():
"""Apply Increase length for connection password"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column(
"password",
existing_type=sa.VARCHAR(length=500),
type_=sa.String(length=5000),
existing_nullable=True,
) |
Unapply Increase length for connection password | def downgrade():
"""Unapply Increase length for connection password"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column(
"password",
existing_type=sa.String(length=5000),
type_=sa.VARCHAR(length=500),
existing_nullable=True,
) |
Apply Add ``DagTags`` table | def upgrade():
"""Apply Add ``DagTags`` table"""
op.create_table(
"dag_tag",
sa.Column("name", sa.String(length=100), nullable=False),
sa.Column("dag_id", StringID(), nullable=False),
sa.ForeignKeyConstraint(
["dag_id"],
["dag.dag_id"],
),
sa.PrimaryKeyConstraint("name", "dag_id"),
) |
Unapply Add ``DagTags`` table | def downgrade():
"""Unapply Add ``DagTags`` table"""
op.drop_table("dag_tag") |
Apply Add ``RenderedTaskInstanceFields`` table | def upgrade():
"""Apply Add ``RenderedTaskInstanceFields`` table"""
json_type = sa.JSON
conn = op.get_bind()
if conn.dialect.name != "postgresql":
# Mysql 5.7+/MariaDB 10.2.3 has JSON support. Rather than checking for
# versions, check for the function existing.
try:
conn.execute(text("SELECT JSON_VALID(1)")).fetchone()
except (sa.exc.OperationalError, sa.exc.ProgrammingError):
json_type = sa.Text
op.create_table(
TABLE_NAME,
sa.Column("dag_id", StringID(), nullable=False),
sa.Column("task_id", StringID(), nullable=False),
sa.Column("execution_date", sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column("rendered_fields", json_type(), nullable=False),
sa.PrimaryKeyConstraint("dag_id", "task_id", "execution_date"),
) |
Drop RenderedTaskInstanceFields table | def downgrade():
"""Drop RenderedTaskInstanceFields table"""
op.drop_table(TABLE_NAME) |
Create DagCode Table. | def upgrade():
"""Create DagCode Table."""
from sqlalchemy.orm import declarative_base
Base = declarative_base()
class SerializedDagModel(Base):
__tablename__ = "serialized_dag"
# There are other columns here, but these are the only ones we need for the SELECT/UPDATE we are doing
dag_id = sa.Column(sa.String(250), primary_key=True)
fileloc = sa.Column(sa.String(2000), nullable=False)
fileloc_hash = sa.Column(sa.BigInteger, nullable=False)
"""Apply add source code table"""
op.create_table(
"dag_code",
sa.Column("fileloc_hash", sa.BigInteger(), nullable=False, primary_key=True, autoincrement=False),
sa.Column("fileloc", sa.String(length=2000), nullable=False),
sa.Column("source_code", sa.UnicodeText(), nullable=False),
sa.Column("last_updated", sa.TIMESTAMP(timezone=True), nullable=False),
)
conn = op.get_bind()
if conn.dialect.name != "sqlite":
if conn.dialect.name == "mssql":
op.drop_index(index_name="idx_fileloc_hash", table_name="serialized_dag")
op.alter_column(
table_name="serialized_dag", column_name="fileloc_hash", type_=sa.BigInteger(), nullable=False
)
if conn.dialect.name == "mssql":
op.create_index(
index_name="idx_fileloc_hash", table_name="serialized_dag", columns=["fileloc_hash"]
)
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=conn)
serialized_dags = session.query(SerializedDagModel).all()
for dag in serialized_dags:
dag.fileloc_hash = DagCode.dag_fileloc_hash(dag.fileloc)
session.merge(dag)
session.commit() |
Unapply add source code table | def downgrade():
"""Unapply add source code table"""
op.drop_table("dag_code") |
Add Precision to ``execution_date`` in ``RenderedTaskInstanceFields`` table for MySQL | def upgrade():
"""Add Precision to ``execution_date`` in ``RenderedTaskInstanceFields`` table for MySQL"""
conn = op.get_bind()
if conn.dialect.name == "mysql":
op.alter_column(
table_name=TABLE_NAME, column_name=COLUMN_NAME, type_=mysql.TIMESTAMP(fsp=6), nullable=False
) |
Unapply Add Precision to ``execution_date`` in ``RenderedTaskInstanceFields`` table | def downgrade():
"""Unapply Add Precision to ``execution_date`` in ``RenderedTaskInstanceFields`` table"""
conn = op.get_bind()
if conn.dialect.name == "mysql":
op.alter_column(
table_name=TABLE_NAME, column_name=COLUMN_NAME, type_=mysql.TIMESTAMP(), nullable=False
) |
Apply Add ``dag_hash`` Column to ``serialized_dag`` table | def upgrade():
"""Apply Add ``dag_hash`` Column to ``serialized_dag`` table"""
op.add_column(
"serialized_dag",
sa.Column("dag_hash", sa.String(32), nullable=False, server_default="Hash not calculated yet"),
) |
Unapply Add ``dag_hash`` Column to ``serialized_dag`` table | def downgrade():
"""Unapply Add ``dag_hash`` Column to ``serialized_dag`` table"""
op.drop_column("serialized_dag", "dag_hash") |
Create FAB Tables | def upgrade():
"""Create FAB Tables"""
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "ab_permission" not in tables:
op.create_table(
"ab_permission",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("name", sa.String(length=100), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
if "ab_view_menu" not in tables:
op.create_table(
"ab_view_menu",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("name", sa.String(length=100), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
if "ab_role" not in tables:
op.create_table(
"ab_role",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("name", sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
if "ab_permission_view" not in tables:
op.create_table(
"ab_permission_view",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("permission_id", sa.Integer(), nullable=True),
sa.Column("view_menu_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["permission_id"], ["ab_permission.id"]),
sa.ForeignKeyConstraint(["view_menu_id"], ["ab_view_menu.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("permission_id", "view_menu_id"),
)
if "ab_permission_view_role" not in tables:
op.create_table(
"ab_permission_view_role",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("permission_view_id", sa.Integer(), nullable=True),
sa.Column("role_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["permission_view_id"], ["ab_permission_view.id"]),
sa.ForeignKeyConstraint(["role_id"], ["ab_role.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("permission_view_id", "role_id"),
)
if "ab_user" not in tables:
op.create_table(
"ab_user",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("first_name", sa.String(length=64), nullable=False),
sa.Column("last_name", sa.String(length=64), nullable=False),
sa.Column("username", sa.String(length=64), nullable=False),
sa.Column("password", sa.String(length=256), nullable=True),
sa.Column("active", sa.Boolean(), nullable=True),
sa.Column("email", sa.String(length=64), nullable=False),
sa.Column("last_login", sa.DateTime(), nullable=True),
sa.Column("login_count", sa.Integer(), nullable=True),
sa.Column("fail_login_count", sa.Integer(), nullable=True),
sa.Column("created_on", sa.DateTime(), nullable=True),
sa.Column("changed_on", sa.DateTime(), nullable=True),
sa.Column("created_by_fk", sa.Integer(), nullable=True),
sa.Column("changed_by_fk", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["changed_by_fk"], ["ab_user.id"]),
sa.ForeignKeyConstraint(["created_by_fk"], ["ab_user.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.UniqueConstraint("username"),
)
if "ab_user_role" not in tables:
op.create_table(
"ab_user_role",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("role_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["role_id"],
["ab_role.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["ab_user.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("user_id", "role_id"),
)
if "ab_register_user" not in tables:
op.create_table(
"ab_register_user",
sa.Column("id", sa.Integer(), nullable=False, primary_key=True),
sa.Column("first_name", sa.String(length=64), nullable=False),
sa.Column("last_name", sa.String(length=64), nullable=False),
sa.Column("username", sa.String(length=64), nullable=False),
sa.Column("password", sa.String(length=256), nullable=True),
sa.Column("email", sa.String(length=64), nullable=False),
sa.Column("registration_date", sa.DateTime(), nullable=True),
sa.Column("registration_hash", sa.String(length=256), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("username"),
) |
Drop FAB Tables | def downgrade():
"""Drop FAB Tables"""
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
fab_tables = [
"ab_permission",
"ab_view_menu",
"ab_role",
"ab_permission_view",
"ab_permission_view_role",
"ab_user",
"ab_user_role",
"ab_register_user",
]
for table in fab_tables:
if table in tables:
indexes = inspector.get_foreign_keys(table)
for index in indexes:
if conn.dialect.name != "sqlite":
op.drop_constraint(index.get("name"), table, type_="foreignkey")
for table in fab_tables:
if table in tables:
if conn.dialect.name == "sqlite":
op.execute("PRAGMA foreign_keys=off")
op.drop_table(table)
op.execute("PRAGMA foreign_keys=on")
else:
op.drop_table(table) |
Apply Increase length of ``Flask-AppBuilder`` ``ab_view_menu.name`` column | def upgrade():
"""Apply Increase length of ``Flask-AppBuilder`` ``ab_view_menu.name`` column"""
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "ab_view_menu" in tables:
if conn.dialect.name == "sqlite":
op.execute("PRAGMA foreign_keys=off")
op.execute(
"""
CREATE TABLE IF NOT EXISTS ab_view_menu_dg_tmp
(
id INTEGER NOT NULL PRIMARY KEY,
name VARCHAR(250) NOT NULL UNIQUE
);
"""
)
op.execute("INSERT INTO ab_view_menu_dg_tmp(id, name) select id, name from ab_view_menu;")
op.execute("DROP TABLE ab_view_menu")
op.execute("ALTER TABLE ab_view_menu_dg_tmp rename to ab_view_menu;")
op.execute("PRAGMA foreign_keys=on")
else:
op.alter_column(
table_name="ab_view_menu",
column_name="name",
type_=StringID(length=250),
nullable=False,
) |
Unapply Increase length of ``Flask-AppBuilder`` ``ab_view_menu.name`` column | def downgrade():
"""Unapply Increase length of ``Flask-AppBuilder`` ``ab_view_menu.name`` column"""
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if "ab_view_menu" in tables:
if conn.dialect.name == "sqlite":
op.execute("PRAGMA foreign_keys=off")
op.execute(
"""
CREATE TABLE IF NOT EXISTS ab_view_menu_dg_tmp
(
id INTEGER NOT NULL PRIMARY KEY,
name VARCHAR(100) NOT NULL UNIQUE
);
"""
)
op.execute("INSERT INTO ab_view_menu_dg_tmp(id, name) select id, name from ab_view_menu;")
op.execute("DROP TABLE ab_view_menu")
op.execute("ALTER TABLE ab_view_menu_dg_tmp rename to ab_view_menu;")
op.execute("PRAGMA foreign_keys=on")
else:
op.alter_column(
table_name="ab_view_menu", column_name="name", type_=sa.String(length=100), nullable=False
) |
This function return primary and unique constraint
along with column name. Some tables like `task_instance`
is missing the primary key constraint name and the name is
auto-generated by the SQL server. so this function helps to
retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table | def get_table_constraints(conn, table_name) -> dict[tuple[str, str], list[str]]:
"""
This function return primary and unique constraint
along with column name. Some tables like `task_instance`
is missing the primary key constraint name and the name is
auto-generated by the SQL server. so this function helps to
retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table
"""
query = text(
f"""SELECT tc.CONSTRAINT_NAME , tc.CONSTRAINT_TYPE, ccu.COLUMN_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu ON ccu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
WHERE tc.TABLE_NAME = '{table_name}' AND
(tc.CONSTRAINT_TYPE = 'PRIMARY KEY' or UPPER(tc.CONSTRAINT_TYPE) = 'UNIQUE')
"""
)
result = conn.execute(query).fetchall()
constraint_dict = defaultdict(list)
for constraint, constraint_type, column in result:
constraint_dict[(constraint, constraint_type)].append(column)
return constraint_dict |
Drop a primary key or unique constraint
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table | def drop_column_constraints(operator, column_name, constraint_dict):
"""
Drop a primary key or unique constraint
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if column_name in columns:
if constraint[1].lower().startswith("primary"):
operator.drop_constraint(constraint[0], type_="primary")
elif constraint[1].lower().startswith("unique"):
operator.drop_constraint(constraint[0], type_="unique") |
Create a primary key or unique constraint
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table | def create_constraints(operator, column_name, constraint_dict):
"""
Create a primary key or unique constraint
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if column_name in columns:
if constraint[1].lower().startswith("primary"):
operator.create_primary_key(constraint_name=constraint[0], columns=columns)
elif constraint[1].lower().startswith("unique"):
operator.create_unique_constraint(constraint_name=constraint[0], columns=columns) |
Apply Remove id column from xcom | def upgrade():
"""Apply Remove id column from xcom"""
conn = op.get_bind()
inspector = inspect(conn)
with op.batch_alter_table("xcom") as bop:
xcom_columns = [col.get("name") for col in inspector.get_columns("xcom")]
if "id" in xcom_columns:
if conn.dialect.name == "mssql":
constraint_dict = get_table_constraints(conn, "xcom")
drop_column_constraints(operator=bop, column_name="id", constraint_dict=constraint_dict)
bop.drop_column("id")
bop.drop_index("idx_xcom_dag_task_date")
# mssql doesn't allow primary keys with nullable columns
if conn.dialect.name != "mssql":
bop.create_primary_key("pk_xcom", ["dag_id", "task_id", "key", "execution_date"]) |
Unapply Remove id column from xcom | def downgrade():
"""Unapply Remove id column from xcom"""
conn = op.get_bind()
with op.batch_alter_table("xcom") as bop:
if conn.dialect.name != "mssql":
bop.drop_constraint("pk_xcom", type_="primary")
bop.add_column(Column("id", Integer, nullable=False))
bop.create_primary_key("id", ["id"])
bop.create_index("idx_xcom_dag_task_date", ["dag_id", "task_id", "key", "execution_date"]) |
Increase column length of pool name from 50 to 256 characters | def upgrade():
"""Increase column length of pool name from 50 to 256 characters"""
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("slot_pool", table_args=sa.UniqueConstraint("pool")) as batch_op:
batch_op.alter_column("pool", type_=sa.String(256, **COLLATION_ARGS)) |
Revert Increased length of pool name from 256 to 50 characters | def downgrade():
"""Revert Increased length of pool name from 256 to 50 characters"""
with op.batch_alter_table("slot_pool", table_args=sa.UniqueConstraint("pool")) as batch_op:
batch_op.alter_column("pool", type_=sa.String(50)) |
Apply Add ``run_type`` column in ``dag_run`` table | def upgrade():
"""Apply Add ``run_type`` column in ``dag_run`` table"""
run_type_col_type = sa.String(length=50)
conn = op.get_bind()
inspector = inspect(conn)
dag_run_columns = [col.get("name") for col in inspector.get_columns("dag_run")]
if "run_type" not in dag_run_columns:
# Add nullable column
with op.batch_alter_table("dag_run") as batch_op:
batch_op.add_column(sa.Column("run_type", run_type_col_type, nullable=True))
# Generate run type for existing records
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=conn)
for run_type in DagRunType:
session.query(DagRun).filter(DagRun.run_id.like(f"{run_type.value}__%")).update(
{DagRun.run_type: run_type.value}, synchronize_session=False
)
session.query(DagRun).filter(DagRun.run_type.is_(None)).update(
{DagRun.run_type: DagRunType.MANUAL.value}, synchronize_session=False
)
session.commit()
# Make run_type not nullable
with op.batch_alter_table("dag_run") as batch_op:
batch_op.alter_column(
"run_type", existing_type=run_type_col_type, type_=run_type_col_type, nullable=False
) |
Unapply Add ``run_type`` column in ``dag_run`` table | def downgrade():
"""Unapply Add ``run_type`` column in ``dag_run`` table"""
op.drop_column("dag_run", "run_type") |
Apply Set ``conn_type`` as non-nullable | def upgrade():
"""Apply Set ``conn_type`` as non-nullable"""
Base = declarative_base()
class Connection(Base):
__tablename__ = "connection"
id = sa.Column(sa.Integer(), primary_key=True)
conn_id = sa.Column(sa.String(250))
conn_type = sa.Column(sa.String(500))
# Generate run type for existing records
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
# imap_default was missing it's type, let's fix that up
session.query(Connection).filter_by(conn_id="imap_default", conn_type=None).update(
{Connection.conn_type: "imap"}, synchronize_session=False
)
session.commit()
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column("conn_type", existing_type=sa.VARCHAR(length=500), nullable=False) |
Unapply Set ``conn_type`` as non-nullable | def downgrade():
"""Unapply Set ``conn_type`` as non-nullable"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column("conn_type", existing_type=sa.VARCHAR(length=500), nullable=True) |
Apply Add unique constraint to ``conn_id`` and set it as non-nullable | def upgrade():
"""Apply Add unique constraint to ``conn_id`` and set it as non-nullable"""
try:
with op.batch_alter_table("connection") as batch_op:
batch_op.alter_column("conn_id", nullable=False, existing_type=sa.String(250, **COLLATION_ARGS))
batch_op.create_unique_constraint(constraint_name="unique_conn_id", columns=["conn_id"])
except sa.exc.IntegrityError:
raise Exception("Make sure there are no duplicate connections with the same conn_id or null values") |
Unapply Add unique constraint to ``conn_id`` and set it as non-nullable | def downgrade():
"""Unapply Add unique constraint to ``conn_id`` and set it as non-nullable"""
with op.batch_alter_table("connection") as batch_op:
batch_op.drop_constraint(constraint_name="unique_conn_id", type_="unique")
batch_op.alter_column("conn_id", nullable=True, existing_type=sa.String(250)) |
Apply Add queued by Job ID to TI | def upgrade():
"""Apply Add queued by Job ID to TI"""
with op.batch_alter_table("task_instance") as batch_op:
batch_op.add_column(sa.Column("queued_by_job_id", sa.Integer(), nullable=True)) |
Unapply Add queued by Job ID to TI | def downgrade():
"""Unapply Add queued by Job ID to TI"""
with op.batch_alter_table("task_instance") as batch_op:
batch_op.drop_column("queued_by_job_id") |
Apply Add external executor ID to TI | def upgrade():
"""Apply Add external executor ID to TI"""
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.add_column(sa.Column("external_executor_id", sa.String(length=250), nullable=True)) |
Unapply Add external executor ID to TI | def downgrade():
"""Unapply Add external executor ID to TI"""
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.drop_column("external_executor_id") |
Apply Drop ``KubeResourceVersion`` and ``KubeWorkerId``entifier tables | def upgrade():
"""Apply Drop ``KubeResourceVersion`` and ``KubeWorkerId``entifier tables"""
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if WORKER_UUID_TABLE in tables:
op.drop_table(WORKER_UUID_TABLE)
if WORKER_RESOURCEVERSION_TABLE in tables:
op.drop_table(WORKER_RESOURCEVERSION_TABLE) |
Unapply Drop ``KubeResourceVersion`` and ``KubeWorkerId``entifier tables | def downgrade():
"""Unapply Drop ``KubeResourceVersion`` and ``KubeWorkerId``entifier tables"""
conn = op.get_bind()
inspector = inspect(conn)
tables = inspector.get_table_names()
if WORKER_UUID_TABLE not in tables:
_add_worker_uuid_table()
if WORKER_RESOURCEVERSION_TABLE not in tables:
_add_resource_table() |
Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG`` | def upgrade():
"""Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG``"""
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
is_mssql = bool(conn.dialect.name == "mssql")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table("dag_run", schema=None) as batch_op:
batch_op.add_column(sa.Column("last_scheduling_decision", TIMESTAMP, nullable=True))
batch_op.create_index("idx_last_scheduling_decision", ["last_scheduling_decision"], unique=False)
batch_op.add_column(sa.Column("dag_hash", sa.String(32), nullable=True))
with op.batch_alter_table("dag", schema=None) as batch_op:
batch_op.add_column(sa.Column("next_dagrun", TIMESTAMP, nullable=True))
batch_op.add_column(sa.Column("next_dagrun_create_after", TIMESTAMP, nullable=True))
# Create with nullable and no default, then ALTER to set values, to avoid table level lock
batch_op.add_column(sa.Column("concurrency", sa.Integer(), nullable=True))
batch_op.add_column(sa.Column("has_task_concurrency_limits", sa.Boolean(), nullable=True))
batch_op.create_index("idx_next_dagrun_create_after", ["next_dagrun_create_after"], unique=False)
try:
from airflow.configuration import conf
concurrency = conf.getint("core", "max_active_tasks_per_dag", fallback=16)
except: # noqa
concurrency = 16
# Set it to true here as it makes us take the slow/more complete path, and when it's next parsed by the
# DagParser it will get set to correct value.
op.execute(
f"""
UPDATE dag SET
concurrency={concurrency},
has_task_concurrency_limits={1 if is_sqlite or is_mssql else sa.true()}
where concurrency IS NULL
"""
)
with op.batch_alter_table("dag", schema=None) as batch_op:
batch_op.alter_column("concurrency", type_=sa.Integer(), nullable=False)
batch_op.alter_column("has_task_concurrency_limits", type_=sa.Boolean(), nullable=False)
if is_sqlite:
op.execute("PRAGMA foreign_keys=on") |
Unapply Add ``scheduling_decision`` to ``DagRun`` and ``DAG`` | def downgrade():
"""Unapply Add ``scheduling_decision`` to ``DagRun`` and ``DAG``"""
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table("dag_run", schema=None) as batch_op:
batch_op.drop_index("idx_last_scheduling_decision")
batch_op.drop_column("last_scheduling_decision")
batch_op.drop_column("dag_hash")
with op.batch_alter_table("dag", schema=None) as batch_op:
batch_op.drop_index("idx_next_dagrun_create_after")
batch_op.drop_column("next_dagrun_create_after")
batch_op.drop_column("next_dagrun")
batch_op.drop_column("concurrency")
batch_op.drop_column("has_task_concurrency_limits")
if is_sqlite:
op.execute("PRAGMA foreign_keys=on") |
Recreate RenderedTaskInstanceFields table changing timestamp to datetime2(6) when using MSSQL as
backend | def upgrade():
"""
Recreate RenderedTaskInstanceFields table changing timestamp to datetime2(6) when using MSSQL as
backend
"""
conn = op.get_bind()
if conn.dialect.name == "mssql":
json_type = sa.Text
op.drop_table(TABLE_NAME)
op.create_table(
TABLE_NAME,
sa.Column("dag_id", sa.String(length=250), nullable=False),
sa.Column("task_id", sa.String(length=250), nullable=False),
sa.Column("execution_date", mssql.DATETIME2, nullable=False),
sa.Column("rendered_fields", json_type(), nullable=False),
sa.PrimaryKeyConstraint("dag_id", "task_id", "execution_date"),
) |
Recreate RenderedTaskInstanceFields table changing datetime2(6) to timestamp when using MSSQL as
backend | def downgrade():
"""
Recreate RenderedTaskInstanceFields table changing datetime2(6) to timestamp when using MSSQL as
backend
"""
conn = op.get_bind()
if conn.dialect.name == "mssql":
json_type = sa.Text
op.drop_table(TABLE_NAME)
op.create_table(
TABLE_NAME,
sa.Column("dag_id", sa.String(length=250), nullable=False),
sa.Column("task_id", sa.String(length=250), nullable=False),
sa.Column("execution_date", sa.TIMESTAMP, nullable=False),
sa.Column("rendered_fields", json_type(), nullable=False),
sa.PrimaryKeyConstraint("dag_id", "task_id", "execution_date"),
) |