response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Get the operator extra links.
This includes both the built-in ones, and those come from the providers. | def get_operator_extra_links() -> set[str]:
"""
Get the operator extra links.
This includes both the built-in ones, and those come from the providers.
"""
_OPERATOR_EXTRA_LINKS.update(ProvidersManager().extra_links_class_names)
return _OPERATOR_EXTRA_LINKS |
Get default partial kwargs in a mapped operator.
This is used to simplify a serialized mapped operator by excluding default
values supplied in the implementation from the serialized dict. Since those
are defaults, they are automatically supplied on de-serialization, so we
don't need to store them. | def _get_default_mapped_partial() -> dict[str, Any]:
"""
Get default partial kwargs in a mapped operator.
This is used to simplify a serialized mapped operator by excluding default
values supplied in the implementation from the serialized dict. Since those
are defaults, they are automatically supplied on de-serialization, so we
don't need to store them.
"""
# Use the private _expand() method to avoid the empty kwargs check.
default = BaseOperator.partial(task_id="_")._expand(EXPAND_INPUT_EMPTY, strict=False).partial_kwargs
return BaseSerialization.serialize(default)[Encoding.VAR] |
Encode a relativedelta object. | def encode_relativedelta(var: relativedelta.relativedelta) -> dict[str, Any]:
"""Encode a relativedelta object."""
encoded = {k: v for k, v in var.__dict__.items() if not k.startswith("_") and v}
if var.weekday and var.weekday.n:
# Every n'th Friday for example
encoded["weekday"] = [var.weekday.weekday, var.weekday.n]
elif var.weekday:
encoded["weekday"] = [var.weekday.weekday]
return encoded |
Dencode a relativedelta object. | def decode_relativedelta(var: dict[str, Any]) -> relativedelta.relativedelta:
"""Dencode a relativedelta object."""
if "weekday" in var:
var["weekday"] = relativedelta.weekday(*var["weekday"]) # type: ignore
return relativedelta.relativedelta(**var) |
Encode a Pendulum Timezone for serialization.
Airflow only supports timezone objects that implements Pendulum's Timezone
interface. We try to keep as much information as possible to make conversion
round-tripping possible (see ``decode_timezone``). We need to special-case
UTC; Pendulum implements it as a FixedTimezone (i.e. it gets encoded as
0 without the special case), but passing 0 into ``pendulum.timezone`` does
not give us UTC (but ``+00:00``). | def encode_timezone(var: Timezone | FixedTimezone) -> str | int:
"""
Encode a Pendulum Timezone for serialization.
Airflow only supports timezone objects that implements Pendulum's Timezone
interface. We try to keep as much information as possible to make conversion
round-tripping possible (see ``decode_timezone``). We need to special-case
UTC; Pendulum implements it as a FixedTimezone (i.e. it gets encoded as
0 without the special case), but passing 0 into ``pendulum.timezone`` does
not give us UTC (but ``+00:00``).
"""
if isinstance(var, FixedTimezone):
if var.offset == 0:
return "UTC"
return var.offset
if isinstance(var, Timezone):
return var.name
raise ValueError(
f"DAG timezone should be a pendulum.tz.Timezone, not {var!r}. "
f"See {get_docs_url('timezone.html#time-zone-aware-dags')}"
) |
Decode a previously serialized Pendulum Timezone. | def decode_timezone(var: str | int) -> Timezone | FixedTimezone:
"""Decode a previously serialized Pendulum Timezone."""
return parse_timezone(var) |
Encode a timetable instance.
This delegates most of the serialization work to the type, so the behavior
can be completely controlled by a custom subclass.
:meta private: | def encode_timetable(var: Timetable) -> dict[str, Any]:
"""
Encode a timetable instance.
This delegates most of the serialization work to the type, so the behavior
can be completely controlled by a custom subclass.
:meta private:
"""
timetable_class = type(var)
importable_string = qualname(timetable_class)
if _get_registered_timetable(importable_string) is None:
raise _TimetableNotRegistered(importable_string)
return {Encoding.TYPE: importable_string, Encoding.VAR: var.serialize()} |
Decode a previously serialized timetable.
Most of the deserialization logic is delegated to the actual type, which
we import from string.
:meta private: | def decode_timetable(var: dict[str, Any]) -> Timetable:
"""
Decode a previously serialized timetable.
Most of the deserialization logic is delegated to the actual type, which
we import from string.
:meta private:
"""
importable_string = var[Encoding.TYPE]
timetable_class = _get_registered_timetable(importable_string)
if timetable_class is None:
raise _TimetableNotRegistered(importable_string)
return timetable_class.deserialize(var[Encoding.VAR]) |
Encode a priority weight strategy instance.
In this version, we only store the importable string, so the class should not wait
for any parameters to be passed to it. If you need to store the parameters, you
should store them in the class itself. | def encode_priority_weight_strategy(var: PriorityWeightStrategy) -> str:
"""
Encode a priority weight strategy instance.
In this version, we only store the importable string, so the class should not wait
for any parameters to be passed to it. If you need to store the parameters, you
should store them in the class itself.
"""
priority_weight_strategy_class = type(var)
if priority_weight_strategy_class in airflow_priority_weight_strategies_classes:
return airflow_priority_weight_strategies_classes[priority_weight_strategy_class]
importable_string = qualname(priority_weight_strategy_class)
if _get_registered_priority_weight_strategy(importable_string) is None:
raise _PriorityWeightStrategyNotRegistered(importable_string)
return importable_string |
Decode a previously serialized priority weight strategy.
In this version, we only store the importable string, so we just need to get the class
from the dictionary of registered classes and instantiate it with no parameters. | def decode_priority_weight_strategy(var: str) -> PriorityWeightStrategy:
"""
Decode a previously serialized priority weight strategy.
In this version, we only store the importable string, so we just need to get the class
from the dictionary of registered classes and instantiate it with no parameters.
"""
priority_weight_strategy_class = _get_registered_priority_weight_strategy(var)
if priority_weight_strategy_class is None:
raise _PriorityWeightStrategyNotRegistered(var)
return priority_weight_strategy_class() |
Encode a Pendulum Timezone for serialization.
Airflow only supports timezone objects that implements Pendulum's Timezone
interface. We try to keep as much information as possible to make conversion
round-tripping possible (see ``decode_timezone``). We need to special-case
UTC; Pendulum implements it as a FixedTimezone (i.e. it gets encoded as
0 without the special case), but passing 0 into ``pendulum.timezone`` does
not give us UTC (but ``+00:00``). | def serialize(o: object) -> tuple[U, str, int, bool]:
"""Encode a Pendulum Timezone for serialization.
Airflow only supports timezone objects that implements Pendulum's Timezone
interface. We try to keep as much information as possible to make conversion
round-tripping possible (see ``decode_timezone``). We need to special-case
UTC; Pendulum implements it as a FixedTimezone (i.e. it gets encoded as
0 without the special case), but passing 0 into ``pendulum.timezone`` does
not give us UTC (but ``+00:00``).
"""
from pendulum.tz.timezone import FixedTimezone
name = qualname(o)
if isinstance(o, FixedTimezone):
if o.offset == 0:
return "UTC", name, __version__, True
return o.offset, name, __version__, True
tz_name = _get_tzinfo_name(cast(datetime.tzinfo, o))
if tz_name is not None:
return tz_name, name, __version__, True
if cast(datetime.tzinfo, o).utcoffset(None) == datetime.timedelta(0):
return "UTC", qualname(FixedTimezone), __version__, True
return "", "", 0, False |
Validate and load a priority weight strategy.
Returns the priority weight strategy if it is valid, otherwise raises an exception.
:param priority_weight_strategy: The priority weight strategy to validate and load.
:meta private: | def validate_and_load_priority_weight_strategy(
priority_weight_strategy: str | PriorityWeightStrategy | None,
) -> PriorityWeightStrategy:
"""Validate and load a priority weight strategy.
Returns the priority weight strategy if it is valid, otherwise raises an exception.
:param priority_weight_strategy: The priority weight strategy to validate and load.
:meta private:
"""
from airflow.serialization.serialized_objects import _get_registered_priority_weight_strategy
from airflow.utils.module_loading import qualname
if priority_weight_strategy is None:
return _AbsolutePriorityWeightStrategy()
if isinstance(priority_weight_strategy, str):
if priority_weight_strategy in airflow_priority_weight_strategies:
return airflow_priority_weight_strategies[priority_weight_strategy]()
priority_weight_strategy_class = priority_weight_strategy
else:
priority_weight_strategy_class = qualname(priority_weight_strategy)
loaded_priority_weight_strategy = _get_registered_priority_weight_strategy(priority_weight_strategy_class)
if loaded_priority_weight_strategy is None:
raise AirflowException(f"Unknown priority strategy {priority_weight_strategy_class}")
return loaded_priority_weight_strategy() |
Get the task runner that can be used to run with the given job runner.
:param local_task_job_runner: The LocalTaskJobRunner associated with the TaskInstance
that needs to be executed.
:return: The task runner to use to run the task. | def get_task_runner(local_task_job_runner: LocalTaskJobRunner) -> BaseTaskRunner:
"""
Get the task runner that can be used to run with the given job runner.
:param local_task_job_runner: The LocalTaskJobRunner associated with the TaskInstance
that needs to be executed.
:return: The task runner to use to run the task.
"""
if _TASK_RUNNER_NAME in CORE_TASK_RUNNERS:
log.debug("Loading core task runner: %s", _TASK_RUNNER_NAME)
task_runner_class = import_string(CORE_TASK_RUNNERS[_TASK_RUNNER_NAME])
else:
log.debug("Loading task runner from custom path: %s", _TASK_RUNNER_NAME)
try:
task_runner_class = import_string(_TASK_RUNNER_NAME)
except ImportError:
raise AirflowConfigException(
f'The task runner could not be loaded. Please check "task_runner" key in "core" section. '
f'Current value: "{_TASK_RUNNER_NAME}".'
)
task_runner = task_runner_class(local_task_job_runner)
return task_runner |
Check whether the given cron runs at least once an hour.
This indicates whether we need to implement a workaround for (what I call)
the "fold hour problem". Folding happens when a region switches time
backwards, usually as a part of ending a DST period, causing a block of time
to occur twice in the wall clock. This is indicated by the ``fold`` flag on
datetime.
As an example, Switzerland in 2023 ended DST on 3am (wall clock time, UTC+2)
by dialing back the clock to 2am (UTC+1). So for (say) ``30 * * * *``, if
the last run was 2:30am (UTC+2), the next needs to be 2:30am (UTC+1, folded)
instead of 3:30am.
While this technically happens for all cron schedules (in such a timezone),
we only care about schedules that create at least one run every hour, and
can provide a somewhat reasonable rationale to skip the fold hour for things
such as ``*/2`` (every two hours). Therefore, we try to *minially* peak into
croniter internals to work around the issue.
The check is simple since croniter internally normalizes things to ``*``.
More edge cases can be added later as needed.
See also: https://github.com/kiorky/croniter/issues/56. | def _covers_every_hour(cron: croniter) -> bool:
"""Check whether the given cron runs at least once an hour.
This indicates whether we need to implement a workaround for (what I call)
the "fold hour problem". Folding happens when a region switches time
backwards, usually as a part of ending a DST period, causing a block of time
to occur twice in the wall clock. This is indicated by the ``fold`` flag on
datetime.
As an example, Switzerland in 2023 ended DST on 3am (wall clock time, UTC+2)
by dialing back the clock to 2am (UTC+1). So for (say) ``30 * * * *``, if
the last run was 2:30am (UTC+2), the next needs to be 2:30am (UTC+1, folded)
instead of 3:30am.
While this technically happens for all cron schedules (in such a timezone),
we only care about schedules that create at least one run every hour, and
can provide a somewhat reasonable rationale to skip the fold hour for things
such as ``*/2`` (every two hours). Therefore, we try to *minially* peak into
croniter internals to work around the issue.
The check is simple since croniter internally normalizes things to ``*``.
More edge cases can be added later as needed.
See also: https://github.com/kiorky/croniter/issues/56.
"""
return cron.expanded[1] == ["*"] |
Build metrics dict from function args.
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics | def _build_metrics(func_name, namespace):
"""
Build metrics dict from function args.
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
sub_commands_to_check = {"users", "connections"}
sensitive_fields = {"-p", "--password", "--conn-password"}
full_command = list(sys.argv)
sub_command = full_command[1] if len(full_command) > 1 else None
if sub_command in sub_commands_to_check:
for idx, command in enumerate(full_command):
if command in sensitive_fields:
# For cases when password is passed as "--password xyz" (with space between key and value)
full_command[idx + 1] = "*" * 8
else:
# For cases when password is passed as "--password=xyz" (with '=' between key and value)
for sensitive_field in sensitive_fields:
if command.startswith(f"{sensitive_field}="):
full_command[idx] = f'{sensitive_field}={"*" * 8}'
metrics = {
"sub_command": func_name,
"start_datetime": timezone.utcnow(),
"full_command": f"{full_command}",
"user": getuser(),
}
if not isinstance(namespace, Namespace):
raise ValueError(
f"namespace argument should be argparse.Namespace instance, but is {type(namespace)}"
)
tmp_dic = vars(namespace)
metrics["dag_id"] = tmp_dic.get("dag_id")
metrics["task_id"] = tmp_dic.get("task_id")
metrics["execution_date"] = tmp_dic.get("execution_date")
metrics["host_name"] = socket.gethostname()
return metrics |
Expand path to absolute by replacing 'DAGS_FOLDER', '~', '.', etc. | def process_subdir(subdir: str | None):
"""Expand path to absolute by replacing 'DAGS_FOLDER', '~', '.', etc."""
if subdir:
if not settings.DAGS_FOLDER:
raise ValueError("DAGS_FOLDER variable in settings should be filled.")
subdir = subdir.replace("DAGS_FOLDER", settings.DAGS_FOLDER)
subdir = os.path.abspath(os.path.expanduser(subdir))
return subdir |
Return DAG of a given dag_id by looking up file location. | def get_dag_by_file_location(dag_id: str):
"""Return DAG of a given dag_id by looking up file location."""
from airflow.models import DagBag, DagModel
# Benefit is that logging from other dags in dagbag will not appear
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise AirflowException(
f"Dag {dag_id!r} could not be found; either it does not exist or it failed to parse."
)
dagbag = DagBag(dag_folder=dag_model.fileloc)
return dagbag.dags[dag_id] |
Search for the file referenced at fileloc.
By the time we get to this function, we've already run this `val` through `process_subdir`
and loaded the DagBag there and came up empty. So here, if `val` is a file path, we make
a last ditch effort to try and find a dag file with the same name in our dags folder. (This
avoids the unnecessary dag parsing that would occur if we just parsed the dags folder).
If `val` is a path to a file, this likely means that the serializing process had a dags_folder
equal to only the dag file in question. This prevents us from determining the relative location.
And if the paths are different between worker and dag processor / scheduler, then we won't find
the dag at the given location. | def _search_for_dag_file(val: str | None) -> str | None:
"""
Search for the file referenced at fileloc.
By the time we get to this function, we've already run this `val` through `process_subdir`
and loaded the DagBag there and came up empty. So here, if `val` is a file path, we make
a last ditch effort to try and find a dag file with the same name in our dags folder. (This
avoids the unnecessary dag parsing that would occur if we just parsed the dags folder).
If `val` is a path to a file, this likely means that the serializing process had a dags_folder
equal to only the dag file in question. This prevents us from determining the relative location.
And if the paths are different between worker and dag processor / scheduler, then we won't find
the dag at the given location.
"""
if val and Path(val).suffix in (".zip", ".py"):
matches = list(Path(settings.DAGS_FOLDER).rglob(Path(val).name))
if len(matches) == 1:
return matches[0].as_posix()
return None |
Return DAG of a given dag_id.
First we'll try to use the given subdir. If that doesn't work, we'll try to
find the correct path (assuming it's a file) and failing that, use the configured
dags folder. | def get_dag(subdir: str | None, dag_id: str, from_db: bool = False) -> DAG:
"""
Return DAG of a given dag_id.
First we'll try to use the given subdir. If that doesn't work, we'll try to
find the correct path (assuming it's a file) and failing that, use the configured
dags folder.
"""
from airflow.models import DagBag
if from_db:
dagbag = DagBag(read_dags_from_db=True)
dag = dagbag.get_dag(dag_id) # get_dag loads from the DB as requested
else:
first_path = process_subdir(subdir)
dagbag = DagBag(first_path)
dag = dagbag.dags.get(dag_id) # avoids db calls made in get_dag
if not dag:
if from_db:
raise AirflowException(f"Dag {dag_id!r} could not be found in DagBag read from database.")
fallback_path = _search_for_dag_file(subdir) or settings.DAGS_FOLDER
logger.warning("Dag %r not found in path %s; trying path %s", dag_id, first_path, fallback_path)
dagbag = DagBag(dag_folder=fallback_path)
dag = dagbag.get_dag(dag_id)
if not dag:
raise AirflowException(
f"Dag {dag_id!r} could not be found; either it does not exist or it failed to parse."
)
return dag |
Return DAG(s) matching a given regex or dag_id. | def get_dags(subdir: str | None, dag_id: str, use_regex: bool = False):
"""Return DAG(s) matching a given regex or dag_id."""
from airflow.models import DagBag
if not use_regex:
return [get_dag(subdir, dag_id)]
dagbag = DagBag(process_subdir(subdir))
matched_dags = [dag for dag in dagbag.dags.values() if re2.search(dag_id, dag.dag_id)]
if not matched_dags:
raise AirflowException(
f"dag_id could not be found with regex: {dag_id}. Either the dag did not exist or "
f"it failed to parse."
)
return matched_dags |
Fetch DAG from the database using pickling. | def get_dag_by_pickle(pickle_id: int, session: Session = NEW_SESSION) -> DAG:
"""Fetch DAG from the database using pickling."""
from airflow.models import DagPickle
dag_pickle = session.scalar(select(DagPickle).where(DagPickle.id == pickle_id).limit(1))
if not dag_pickle:
raise AirflowException(f"pickle_id could not be found in DagPickle.id list: {pickle_id}")
pickle_dag = dag_pickle.pickle
return pickle_dag |
Create logging paths. | def setup_locations(process, pid=None, stdout=None, stderr=None, log=None):
"""Create logging paths."""
if not stderr:
stderr = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.err")
if not stdout:
stdout = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.out")
if not log:
log = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.log")
if not pid:
pid = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process}.pid")
else:
pid = os.path.abspath(pid)
return pid, stdout, stderr, log |
Create log file handler for daemon process. | def setup_logging(filename):
"""Create log file handler for daemon process."""
root = logging.getLogger()
handler = NonCachingFileHandler(filename)
formatter = logging.Formatter(settings.SIMPLE_LOG_FORMAT)
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(settings.LOGGING_LEVEL)
return handler.stream |
Return without error on SIGINT or SIGTERM signals in interactive command mode.
e.g. CTRL+C or kill <PID> | def sigint_handler(sig, frame):
"""
Return without error on SIGINT or SIGTERM signals in interactive command mode.
e.g. CTRL+C or kill <PID>
"""
sys.exit(0) |
Help debug deadlocks by printing stacktraces when this gets a SIGQUIT.
e.g. kill -s QUIT <PID> or CTRL+ | def sigquit_handler(sig, frame):
"""
Help debug deadlocks by printing stacktraces when this gets a SIGQUIT.
e.g. kill -s QUIT <PID> or CTRL+
"""
print(f"Dumping stack traces for all threads in PID {os.getpid()}")
id_to_name = {th.ident: th.name for th in threading.enumerate()}
code = []
for thread_id, stack in sys._current_frames().items():
code.append(f"\n# Thread: {id_to_name.get(thread_id, '')}({thread_id})")
for filename, line_number, name, line in traceback.extract_stack(stack):
code.append(f'File: "{filename}", line {line_number}, in {name}')
if line:
code.append(f" {line.strip()}")
print("\n".join(code)) |
Process arguments and decide whether to enable color in output. | def should_use_colors(args) -> bool:
"""Process arguments and decide whether to enable color in output."""
if args.color == ColorMode.ON:
return True
if args.color == ColorMode.OFF:
return False
return is_terminal_support_colors() |
Suppress logging and warning messages in cli functions. | def suppress_logs_and_warning(f: T) -> T:
"""Suppress logging and warning messages in cli functions."""
@functools.wraps(f)
def _wrapper(*args, **kwargs):
_check_cli_args(args)
if args[0].verbose:
f(*args, **kwargs)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
logging.disable(logging.CRITICAL)
try:
f(*args, **kwargs)
finally:
# logging output again depends on the effective
# levels of individual loggers
logging.disable(logging.NOTSET)
return cast(T, _wrapper) |
Register more action_logger function callback for pre-execution.
This function callback is expected to be called with keyword args.
For more about the arguments that is being passed to the callback,
refer to airflow.utils.cli.action_logging().
:param action_logger: An action logger function
:return: None | def register_pre_exec_callback(action_logger):
"""Register more action_logger function callback for pre-execution.
This function callback is expected to be called with keyword args.
For more about the arguments that is being passed to the callback,
refer to airflow.utils.cli.action_logging().
:param action_logger: An action logger function
:return: None
"""
logger.debug("Adding %s to pre execution callback", action_logger)
__pre_exec_callbacks.append(action_logger) |
Register more action_logger function callback for post-execution.
This function callback is expected to be called with keyword args.
For more about the arguments that is being passed to the callback,
refer to airflow.utils.cli.action_logging().
:param action_logger: An action logger function
:return: None | def register_post_exec_callback(action_logger):
"""Register more action_logger function callback for post-execution.
This function callback is expected to be called with keyword args.
For more about the arguments that is being passed to the callback,
refer to airflow.utils.cli.action_logging().
:param action_logger: An action logger function
:return: None
"""
logger.debug("Adding %s to post execution callback", action_logger)
__post_exec_callbacks.append(action_logger) |
Call callbacks before execution.
Note that any exception from callback will be logged but won't be propagated.
:param kwargs:
:return: None | def on_pre_execution(**kwargs):
"""Call callbacks before execution.
Note that any exception from callback will be logged but won't be propagated.
:param kwargs:
:return: None
"""
logger.debug("Calling callbacks: %s", __pre_exec_callbacks)
for callback in __pre_exec_callbacks:
try:
callback(**kwargs)
except Exception:
logger.exception("Failed on pre-execution callback using %s", callback) |
Call callbacks after execution.
As it's being called after execution, it can capture status of execution,
duration, etc. Note that any exception from callback will be logged but
won't be propagated.
:param kwargs:
:return: None | def on_post_execution(**kwargs):
"""Call callbacks after execution.
As it's being called after execution, it can capture status of execution,
duration, etc. Note that any exception from callback will be logged but
won't be propagated.
:param kwargs:
:return: None
"""
logger.debug("Calling callbacks: %s", __post_exec_callbacks)
for callback in __post_exec_callbacks:
try:
callback(**kwargs)
except Exception:
logger.exception("Failed on post-execution callback using %s", callback) |
Behave similar to ``action_logging``; default action logger callback.
The difference is this function uses the global ORM session, and pushes a
``Log`` row into the database instead of actually logging. | def default_action_log(sub_command, user, task_id, dag_id, execution_date, host_name, full_command, **_):
"""
Behave similar to ``action_logging``; default action logger callback.
The difference is this function uses the global ORM session, and pushes a
``Log`` row into the database instead of actually logging.
"""
_default_action_log_internal(
sub_command=sub_command,
user=user,
task_id=task_id,
dag_id=dag_id,
execution_date=execution_date,
host_name=host_name,
full_command=full_command,
) |
RPC portion of default_action_log.
To use RPC, we need to accept a session, which is provided by the RPC call handler.
But, the action log callback system may already be forwarding a session, so to avoid
a collision, I have made this internal function instead of making default_action_log
an RPC function. | def _default_action_log_internal(
*,
sub_command,
user,
task_id,
dag_id,
execution_date,
host_name,
full_command,
session: Session = NEW_SESSION,
):
"""
RPC portion of default_action_log.
To use RPC, we need to accept a session, which is provided by the RPC call handler.
But, the action log callback system may already be forwarding a session, so to avoid
a collision, I have made this internal function instead of making default_action_log
an RPC function.
"""
from sqlalchemy.exc import OperationalError, ProgrammingError
from airflow.models.log import Log
from airflow.utils import timezone
try:
# Use bulk_insert_mappings here to avoid importing all models (which using the classes does) early
# on in the CLI
session.bulk_insert_mappings(
Log,
[
{
"event": f"cli_{sub_command}",
"task_instance": None,
"owner": user,
"extra": json.dumps({"host_name": host_name, "full_command": full_command}),
"task_id": task_id,
"dag_id": dag_id,
"execution_date": execution_date,
"dttm": timezone.utcnow(),
}
],
)
session.commit()
except (OperationalError, ProgrammingError) as e:
expected = [
'"log" does not exist', # postgres
"no such table", # sqlite
"log' doesn't exist", # mysql
]
error_is_ok = e.args and any(x in e.args[0] for x in expected)
if not error_is_ok:
logger.warning("Failed to log action %s", e)
session.rollback()
except Exception as e:
logger.warning("Failed to log action %s", e)
session.rollback() |
Get Python source (or not), preventing exceptions. | def get_python_source(x: Any) -> str | None:
"""Get Python source (or not), preventing exceptions."""
if isinstance(x, str):
return x
if x is None:
return None
source_code = None
if isinstance(x, functools.partial):
source_code = inspect.getsource(x.func)
if source_code is None:
try:
source_code = inspect.getsource(x)
except TypeError:
pass
if source_code is None:
try:
source_code = inspect.getsource(x.__call__)
except (TypeError, AttributeError):
pass
if source_code is None:
source_code = f"No source code available for {type(x)}"
return source_code |
Prepare code snippet with line numbers and a specific line marked.
:param file_path: File name
:param line_no: Line number
:param context_lines_count: The number of lines that will be cut before and after.
:return: str | def prepare_code_snippet(file_path: str, line_no: int, context_lines_count: int = 5) -> str:
"""
Prepare code snippet with line numbers and a specific line marked.
:param file_path: File name
:param line_no: Line number
:param context_lines_count: The number of lines that will be cut before and after.
:return: str
"""
code_lines = Path(file_path).read_text().splitlines()
# Prepend line number
code_lines = [
f">{lno:3} | {line}" if line_no == lno else f"{lno:4} | {line}"
for lno, line in enumerate(code_lines, 1)
]
# # Cut out the snippet
start_line_no = max(0, line_no - context_lines_count - 1)
end_line_no = line_no + context_lines_count
code_lines = code_lines[start_line_no:end_line_no]
# Join lines
code = "\n".join(code_lines)
return code |
Return the best formatter available in the current terminal. | def get_terminal_formatter(**opts):
"""Return the best formatter available in the current terminal."""
if "256" in os.environ.get("TERM", ""):
from pygments.formatters.terminal256 import Terminal256Formatter
formatter = Terminal256Formatter(**opts)
else:
from pygments.formatters.terminal import TerminalFormatter
formatter = TerminalFormatter(**opts)
return formatter |
Uncompress gz and bz2 files. | def uncompress_file(input_file_name, file_extension, dest_dir):
"""Uncompress gz and bz2 files."""
if file_extension.lower() not in (".gz", ".bz2"):
raise NotImplementedError(
f"Received {file_extension} format. Only gz and bz2 files can currently be uncompressed."
)
if file_extension.lower() == ".gz":
fmodule = gzip.GzipFile
elif file_extension.lower() == ".bz2":
fmodule = bz2.BZ2File
with fmodule(input_file_name, mode="rb") as f_compressed, NamedTemporaryFile(
dir=dest_dir, mode="wb", delete=False
) as f_uncompressed:
shutil.copyfileobj(f_compressed, f_uncompressed)
return f_uncompressed.name |
Return a path for a temporary file including a full copy of the configuration settings.
:param include_env: Should the value of configuration from ``AIRFLOW__``
environment variables be included or not
:param include_cmds: Should the result of calling any *_cmd config be
set (True, default), or should the _cmd options be left as the
command to run (False)
:return: a path to a temporary file | def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):
"""
Return a path for a temporary file including a full copy of the configuration settings.
:param include_env: Should the value of configuration from ``AIRFLOW__``
environment variables be included or not
:param include_cmds: Should the result of calling any *_cmd config be
set (True, default), or should the _cmd options be left as the
command to run (False)
:return: a path to a temporary file
"""
cfg_dict = conf.as_dict(
display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env
)
temp_fd, cfg_path = mkstemp()
with os.fdopen(temp_fd, "w") as temp_file:
# Set the permissions before we write anything to it.
if chmod is not None and not IS_WINDOWS:
os.fchmod(temp_fd, chmod)
json.dump(cfg_dict, temp_file)
return cfg_path |
Merge parameters into an existing context.
Like ``dict.update()`` , this take the same parameters, and updates
``context`` in-place.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private: | def context_merge(context: Context, *args: Any, **kwargs: Any) -> None:
"""Merge parameters into an existing context.
Like ``dict.update()`` , this take the same parameters, and updates
``context`` in-place.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
context.update(*args, **kwargs) |
Update context after task unmapping.
Since ``get_template_context()`` is called before unmapping, the context
contains information about the mapped task. We need to do some in-place
updates to ensure the template context reflects the unmapped task instead.
:meta private: | def context_update_for_unmapped(context: Context, task: BaseOperator) -> None:
"""Update context after task unmapping.
Since ``get_template_context()`` is called before unmapping, the context
contains information about the mapped task. We need to do some in-place
updates to ensure the template context reflects the unmapped task instead.
:meta private:
"""
from airflow.models.param import process_params
context["task"] = context["ti"].task = task
context["params"] = process_params(context["dag"], task, context["dag_run"], suppress_exception=False) |
Create a context by copying items under selected keys in ``source``.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private: | def context_copy_partial(source: Context, keys: Container[str]) -> Context:
"""Create a context by copying items under selected keys in ``source``.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
new = Context({k: v for k, v in source._context.items() if k in keys})
new._deprecation_replacements = source._deprecation_replacements.copy()
return new |
Create a mapping that wraps deprecated entries in a lazy object proxy.
This further delays deprecation warning to until when the entry is actually
used, instead of when it's accessed in the context. The result is useful for
passing into a callable with ``**kwargs``, which would unpack the mapping
too eagerly otherwise.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private: | def lazy_mapping_from_context(source: Context) -> Mapping[str, Any]:
"""Create a mapping that wraps deprecated entries in a lazy object proxy.
This further delays deprecation warning to until when the entry is actually
used, instead of when it's accessed in the context. The result is useful for
passing into a callable with ``**kwargs``, which would unpack the mapping
too eagerly otherwise.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
if not isinstance(source, Context):
# Sometimes we are passed a plain dict (usually in tests, or in User's
# custom operators) -- be lienent about what we accept so we don't
# break anything for users.
return source
def _deprecated_proxy_factory(k: str, v: Any) -> Any:
replacements = source._deprecation_replacements[k]
warnings.warn(_create_deprecation_warning(k, replacements), stacklevel=2)
return v
def _create_value(k: str, v: Any) -> Any:
if k not in source._deprecation_replacements:
return v
factory = functools.partial(_deprecated_proxy_factory, k, v)
return lazy_object_proxy.Proxy(factory)
return {k: _create_value(k, v) for k, v in source._context.items()} |
A wrapper function of `check_cycle` for backward compatibility purpose.
New code should use `check_cycle` instead since this function name `test_cycle` starts
with 'test_' and will be considered as a unit test by pytest, resulting in failure. | def test_cycle(dag: DAG) -> None:
"""
A wrapper function of `check_cycle` for backward compatibility purpose.
New code should use `check_cycle` instead since this function name `test_cycle` starts
with 'test_' and will be considered as a unit test by pytest, resulting in failure.
"""
from warnings import warn
warn(
"Deprecated, please use `check_cycle` at the same module instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return check_cycle(dag) |
Check to see if there are any cycles in the DAG.
:raises AirflowDagCycleException: If cycle is found in the DAG. | def check_cycle(dag: DAG) -> None:
"""Check to see if there are any cycles in the DAG.
:raises AirflowDagCycleException: If cycle is found in the DAG.
"""
# default of int is 0 which corresponds to CYCLE_NEW
visited: dict[str, int] = defaultdict(int)
path_stack: deque[str] = deque()
task_dict = dag.task_dict
def _check_adjacent_tasks(task_id, current_task):
"""Return first untraversed child task, else None if all tasks traversed."""
for adjacent_task in current_task.get_direct_relative_ids():
if visited[adjacent_task] == CYCLE_IN_PROGRESS:
msg = f"Cycle detected in DAG: {dag.dag_id}. Faulty task: {task_id}"
raise AirflowDagCycleException(msg)
elif visited[adjacent_task] == CYCLE_NEW:
return adjacent_task
return None
for dag_task_id in dag.task_dict.keys():
if visited[dag_task_id] == CYCLE_DONE:
continue
path_stack.append(dag_task_id)
while path_stack:
current_task_id = path_stack[-1]
if visited[current_task_id] == CYCLE_NEW:
visited[current_task_id] = CYCLE_IN_PROGRESS
task = task_dict[current_task_id]
child_to_check = _check_adjacent_tasks(current_task_id, task)
if not child_to_check:
visited[current_task_id] = CYCLE_DONE
path_stack.pop()
else:
path_stack.append(child_to_check) |
Create the list of edges needed to construct the Graph view.
A special case is made if a TaskGroup is immediately upstream/downstream of another
TaskGroup or task. Two proxy nodes named upstream_join_id and downstream_join_id are
created for the TaskGroup. Instead of drawing an edge onto every task in the TaskGroup,
all edges are directed onto the proxy nodes. This is to cut down the number of edges on
the graph.
For example: A DAG with TaskGroups group1 and group2:
group1: task1, task2, task3
group2: task4, task5, task6
group2 is downstream of group1:
group1 >> group2
Edges to add (This avoids having to create edges between every task in group1 and group2):
task1 >> downstream_join_id
task2 >> downstream_join_id
task3 >> downstream_join_id
downstream_join_id >> upstream_join_id
upstream_join_id >> task4
upstream_join_id >> task5
upstream_join_id >> task6 | def dag_edges(dag: DAG):
"""
Create the list of edges needed to construct the Graph view.
A special case is made if a TaskGroup is immediately upstream/downstream of another
TaskGroup or task. Two proxy nodes named upstream_join_id and downstream_join_id are
created for the TaskGroup. Instead of drawing an edge onto every task in the TaskGroup,
all edges are directed onto the proxy nodes. This is to cut down the number of edges on
the graph.
For example: A DAG with TaskGroups group1 and group2:
group1: task1, task2, task3
group2: task4, task5, task6
group2 is downstream of group1:
group1 >> group2
Edges to add (This avoids having to create edges between every task in group1 and group2):
task1 >> downstream_join_id
task2 >> downstream_join_id
task3 >> downstream_join_id
downstream_join_id >> upstream_join_id
upstream_join_id >> task4
upstream_join_id >> task5
upstream_join_id >> task6
"""
# Edges to add between TaskGroup
edges_to_add = set()
# Edges to remove between individual tasks that are replaced by edges_to_add.
edges_to_skip = set()
task_group_map = dag.task_group.get_task_group_dict()
def collect_edges(task_group):
"""Update edges_to_add and edges_to_skip according to TaskGroups."""
if isinstance(task_group, AbstractOperator):
return
for target_id in task_group.downstream_group_ids:
# For every TaskGroup immediately downstream, add edges between downstream_join_id
# and upstream_join_id. Skip edges between individual tasks of the TaskGroups.
target_group = task_group_map[target_id]
edges_to_add.add((task_group.downstream_join_id, target_group.upstream_join_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
for target in target_group.get_roots():
edges_to_skip.add((child.task_id, target.task_id))
edges_to_skip.add((child.task_id, target_group.upstream_join_id))
for child in target_group.get_roots():
edges_to_add.add((target_group.upstream_join_id, child.task_id))
edges_to_skip.add((task_group.downstream_join_id, child.task_id))
# For every individual task immediately downstream, add edges between downstream_join_id and
# the downstream task. Skip edges between individual tasks of the TaskGroup and the
# downstream task.
for target_id in task_group.downstream_task_ids:
edges_to_add.add((task_group.downstream_join_id, target_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
edges_to_skip.add((child.task_id, target_id))
# For every individual task immediately upstream, add edges between the upstream task
# and upstream_join_id. Skip edges between the upstream task and individual tasks
# of the TaskGroup.
for source_id in task_group.upstream_task_ids:
edges_to_add.add((source_id, task_group.upstream_join_id))
for child in task_group.get_roots():
edges_to_add.add((task_group.upstream_join_id, child.task_id))
edges_to_skip.add((source_id, child.task_id))
for child in task_group.children.values():
collect_edges(child)
collect_edges(dag.task_group)
# Collect all the edges between individual tasks
edges = set()
setup_teardown_edges = set()
tasks_to_trace: list[Operator] = dag.roots
while tasks_to_trace:
tasks_to_trace_next: list[Operator] = []
for task in tasks_to_trace:
for child in task.downstream_list:
edge = (task.task_id, child.task_id)
if task.is_setup and child.is_teardown:
setup_teardown_edges.add(edge)
if edge not in edges:
edges.add(edge)
tasks_to_trace_next.append(child)
tasks_to_trace = tasks_to_trace_next
result = []
# Build result dicts with the two ends of the edge, plus any extra metadata
# if we have it.
for source_id, target_id in sorted(edges.union(edges_to_add) - edges_to_skip):
record = {"source_id": source_id, "target_id": target_id}
label = dag.get_edge_info(source_id, target_id).get("label")
if (source_id, target_id) in setup_teardown_edges:
record["is_setup_teardown"] = True
if label:
record["label"] = label
result.append(record)
return result |
Return the current (DAG) parsing context info. | def get_parsing_context() -> AirflowParsingContext:
"""Return the current (DAG) parsing context info."""
return AirflowParsingContext(
dag_id=os.environ.get(_AIRFLOW_PARSING_CONTEXT_DAG_ID),
task_id=os.environ.get(_AIRFLOW_PARSING_CONTEXT_TASK_ID),
) |
Get a list of dates in the specified range, separated by delta.
.. code-block:: pycon
>>> from airflow.utils.dates import date_range
>>> from datetime import datetime, timedelta
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 2, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 3, 0, 0, tzinfo=Timezone('UTC'))]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta="0 0 * * *")
[datetime.datetime(2016, 1, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 2, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 3, 0, 0, tzinfo=Timezone('UTC'))]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 2, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 3, 1, 0, 0, tzinfo=Timezone('UTC'))]
:param start_date: anchor date to start the series from
:param end_date: right boundary for the date range
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:param delta: step length. It can be datetime.timedelta or cron expression as string | def date_range(
start_date: datetime,
end_date: datetime | None = None,
num: int | None = None,
delta: str | timedelta | relativedelta | None = None,
) -> list[datetime]:
"""Get a list of dates in the specified range, separated by delta.
.. code-block:: pycon
>>> from airflow.utils.dates import date_range
>>> from datetime import datetime, timedelta
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 2, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 3, 0, 0, tzinfo=Timezone('UTC'))]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta="0 0 * * *")
[datetime.datetime(2016, 1, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 2, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 1, 3, 0, 0, tzinfo=Timezone('UTC'))]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 2, 1, 0, 0, tzinfo=Timezone('UTC')),
datetime.datetime(2016, 3, 1, 0, 0, tzinfo=Timezone('UTC'))]
:param start_date: anchor date to start the series from
:param end_date: right boundary for the date range
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:param delta: step length. It can be datetime.timedelta or cron expression as string
"""
warnings.warn(
"`airflow.utils.dates.date_range()` is deprecated. Please use `airflow.timetables`.",
category=RemovedInAirflow3Warning,
stacklevel=2,
)
if not delta:
return []
if end_date:
if start_date > end_date:
raise ValueError("Wait. start_date needs to be before end_date")
if num:
raise ValueError("Wait. Either specify end_date OR num")
if not end_date and not num:
end_date = timezone.utcnow()
delta_iscron = False
time_zone = start_date.tzinfo
abs_delta: timedelta | relativedelta
if isinstance(delta, str):
delta_iscron = True
if timezone.is_localized(start_date):
start_date = timezone.make_naive(start_date, time_zone)
cron = croniter(cron_presets.get(delta, delta), start_date)
elif isinstance(delta, timedelta):
abs_delta = abs(delta)
elif isinstance(delta, relativedelta):
abs_delta = abs(delta)
else:
raise TypeError("Wait. delta must be either datetime.timedelta or cron expression as str")
dates = []
if end_date:
if timezone.is_naive(start_date) and not timezone.is_naive(end_date):
end_date = timezone.make_naive(end_date, time_zone)
while start_date <= end_date: # type: ignore
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, time_zone))
else:
dates.append(start_date)
if delta_iscron:
start_date = cron.get_next(datetime)
else:
start_date += abs_delta
else:
num_entries: int = num # type: ignore
for _ in range(abs(num_entries)):
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, time_zone))
else:
dates.append(start_date)
if delta_iscron and num_entries > 0:
start_date = cron.get_next(datetime)
elif delta_iscron:
start_date = cron.get_prev(datetime)
elif num_entries > 0:
start_date += abs_delta
else:
start_date -= abs_delta
return sorted(dates) |
Return ``start_date + i * delta`` for given ``i`` where the result is closest to ``dt``.
.. code-block:: pycon
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0) | def round_time(
dt: datetime,
delta: str | timedelta | relativedelta,
start_date: datetime = timezone.make_aware(datetime.min),
):
"""Return ``start_date + i * delta`` for given ``i`` where the result is closest to ``dt``.
.. code-block:: pycon
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
if isinstance(delta, str):
# It's cron based, so it's easy
time_zone = start_date.tzinfo
start_date = timezone.make_naive(start_date, time_zone)
cron = croniter(delta, start_date)
prev = cron.get_prev(datetime)
if prev == start_date:
return timezone.make_aware(start_date, time_zone)
else:
return timezone.make_aware(prev, time_zone)
# Ignore the microseconds of dt
dt -= timedelta(microseconds=dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know its exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then dissecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper * delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1) * delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (start_date + (lower + 1) * delta) - dt <= dt - (start_date + lower * delta):
return start_date + (lower + 1) * delta
else:
return start_date + lower * delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate * delta >= dt:
upper = candidate
else:
lower = candidate |
Determine the most appropriate time unit for given durations (in seconds).
e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours' | def infer_time_unit(time_seconds_arr: Collection[float]) -> TimeUnit:
"""Determine the most appropriate time unit for given durations (in seconds).
e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours'
"""
if not time_seconds_arr:
return "hours"
max_time_seconds = max(time_seconds_arr)
if max_time_seconds <= 60 * 2:
return "seconds"
elif max_time_seconds <= 60 * 60 * 2:
return "minutes"
elif max_time_seconds <= 24 * 60 * 60 * 2:
return "hours"
else:
return "days" |
Convert an array of time durations in seconds to the specified time unit. | def scale_time_units(time_seconds_arr: Collection[float], unit: TimeUnit) -> Collection[float]:
"""Convert an array of time durations in seconds to the specified time unit."""
if unit == "minutes":
factor = 60
elif unit == "hours":
factor = 60 * 60
elif unit == "days":
factor = 24 * 60 * 60
else:
factor = 1
return [x / factor for x in time_seconds_arr] |
Get a datetime object representing *n* days ago.
By default the time is set to midnight. | def days_ago(n, hour=0, minute=0, second=0, microsecond=0):
"""Get a datetime object representing *n* days ago.
By default the time is set to midnight.
"""
warnings.warn(
"Function `days_ago` is deprecated and will be removed in Airflow 3.0. "
"You can achieve equivalent behavior with `pendulum.today('UTC').add(days=-N, ...)`",
RemovedInAirflow3Warning,
stacklevel=2,
)
today = timezone.utcnow().replace(hour=hour, minute=minute, second=second, microsecond=microsecond)
return today - timedelta(days=n) |
Parse execution date string to datetime object. | def parse_execution_date(execution_date_str):
"""Parse execution date string to datetime object."""
return timezone.parse(execution_date_str) |
Add new Connection. | def merge_conn(conn: Connection, session: Session = NEW_SESSION):
"""Add new Connection."""
if not session.scalar(select(1).where(conn.__class__.conn_id == conn.conn_id)):
session.add(conn)
session.commit() |
Add default pool if it does not exist. | def add_default_pool_if_not_exists(session: Session = NEW_SESSION):
"""Add default pool if it does not exist."""
from airflow.models.pool import Pool
if not Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session):
default_pool = Pool(
pool=Pool.DEFAULT_POOL_NAME,
slots=conf.getint(section="core", key="default_pool_task_slot_count"),
description="Default pool",
include_deferred=False,
)
session.add(default_pool)
session.commit() |
Create default Airflow connections. | def create_default_connections(session: Session = NEW_SESSION):
"""Create default Airflow connections."""
from airflow.models.connection import Connection
merge_conn(
Connection(
conn_id="airflow_db",
conn_type="mysql",
host="mysql",
login="root",
password="",
schema="airflow",
),
session,
)
merge_conn(
Connection(
conn_id="athena_default",
conn_type="athena",
),
session,
)
merge_conn(
Connection(
conn_id="aws_default",
conn_type="aws",
),
session,
)
merge_conn(
Connection(
conn_id="azure_batch_default",
conn_type="azure_batch",
login="<ACCOUNT_NAME>",
password="",
extra="""{"account_url": "<ACCOUNT_URL>"}""",
)
)
merge_conn(
Connection(
conn_id="azure_cosmos_default",
conn_type="azure_cosmos",
extra='{"database_name": "<DATABASE_NAME>", "collection_name": "<COLLECTION_NAME>" }',
),
session,
)
merge_conn(
Connection(
conn_id="azure_data_explorer_default",
conn_type="azure_data_explorer",
host="https://<CLUSTER>.kusto.windows.net",
extra="""{"auth_method": "<AAD_APP | AAD_APP_CERT | AAD_CREDS | AAD_DEVICE>",
"tenant": "<TENANT ID>", "certificate": "<APPLICATION PEM CERTIFICATE>",
"thumbprint": "<APPLICATION CERTIFICATE THUMBPRINT>"}""",
),
session,
)
merge_conn(
Connection(
conn_id="azure_data_lake_default",
conn_type="azure_data_lake",
extra='{"tenant": "<TENANT>", "account_name": "<ACCOUNTNAME>" }',
),
session,
)
merge_conn(
Connection(
conn_id="azure_default",
conn_type="azure",
),
session,
)
merge_conn(
Connection(
conn_id="cassandra_default",
conn_type="cassandra",
host="cassandra",
port=9042,
),
session,
)
merge_conn(
Connection(
conn_id="databricks_default",
conn_type="databricks",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="dingding_default",
conn_type="http",
host="",
password="",
),
session,
)
merge_conn(
Connection(
conn_id="drill_default",
conn_type="drill",
host="localhost",
port=8047,
extra='{"dialect_driver": "drill+sadrill", "storage_plugin": "dfs"}',
),
session,
)
merge_conn(
Connection(
conn_id="druid_broker_default",
conn_type="druid",
host="druid-broker",
port=8082,
extra='{"endpoint": "druid/v2/sql"}',
),
session,
)
merge_conn(
Connection(
conn_id="druid_ingest_default",
conn_type="druid",
host="druid-overlord",
port=8081,
extra='{"endpoint": "druid/indexer/v1/task"}',
),
session,
)
merge_conn(
Connection(
conn_id="elasticsearch_default",
conn_type="elasticsearch",
host="localhost",
schema="http",
port=9200,
),
session,
)
merge_conn(
Connection(
conn_id="emr_default",
conn_type="emr",
extra="""
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"Ec2KeyName": "mykey",
"Ec2SubnetId": "somesubnet",
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Core nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
],
"TerminationProtected": false,
"KeepJobFlowAliveWhenNoSteps": false
},
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
""",
),
session,
)
merge_conn(
Connection(
conn_id="facebook_default",
conn_type="facebook_social",
extra="""
{ "account_id": "<AD_ACCOUNT_ID>",
"app_id": "<FACEBOOK_APP_ID>",
"app_secret": "<FACEBOOK_APP_SECRET>",
"access_token": "<FACEBOOK_AD_ACCESS_TOKEN>"
}
""",
),
session,
)
merge_conn(
Connection(
conn_id="fs_default",
conn_type="fs",
extra='{"path": "/"}',
),
session,
)
merge_conn(
Connection(
conn_id="ftp_default",
conn_type="ftp",
host="localhost",
port=21,
login="airflow",
password="airflow",
extra='{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}',
),
session,
)
merge_conn(
Connection(
conn_id="google_cloud_default",
conn_type="google_cloud_platform",
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="hive_cli_default",
conn_type="hive_cli",
port=10000,
host="localhost",
extra='{"use_beeline": true, "auth": ""}',
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="hiveserver2_default",
conn_type="hiveserver2",
host="localhost",
schema="default",
port=10000,
),
session,
)
merge_conn(
Connection(
conn_id="http_default",
conn_type="http",
host="https://www.httpbin.org/",
),
session,
)
merge_conn(Connection(conn_id="impala_default", conn_type="impala", host="localhost", port=21050))
merge_conn(
Connection(
conn_id="kafka_default",
conn_type="kafka",
extra=json.dumps({"bootstrap.servers": "broker:29092"}),
),
session,
)
merge_conn(
Connection(
conn_id="kubernetes_default",
conn_type="kubernetes",
),
session,
)
merge_conn(
Connection(
conn_id="kylin_default",
conn_type="kylin",
host="localhost",
port=7070,
login="ADMIN",
password="KYLIN",
),
session,
)
merge_conn(
Connection(
conn_id="leveldb_default",
conn_type="leveldb",
host="localhost",
),
session,
)
merge_conn(Connection(conn_id="livy_default", conn_type="livy", host="livy", port=8998), session)
merge_conn(
Connection(
conn_id="local_mysql",
conn_type="mysql",
host="localhost",
login="airflow",
password="airflow",
schema="airflow",
),
session,
)
merge_conn(
Connection(
conn_id="metastore_default",
conn_type="hive_metastore",
host="localhost",
extra='{"authMechanism": "PLAIN"}',
port=9083,
),
session,
)
merge_conn(Connection(conn_id="mongo_default", conn_type="mongo", host="mongo", port=27017), session)
merge_conn(
Connection(
conn_id="mssql_default",
conn_type="mssql",
host="localhost",
port=1433,
),
session,
)
merge_conn(
Connection(
conn_id="mysql_default",
conn_type="mysql",
login="root",
schema="airflow",
host="mysql",
),
session,
)
merge_conn(
Connection(
conn_id="opsgenie_default",
conn_type="http",
host="",
password="",
),
session,
)
merge_conn(
Connection(
conn_id="oracle_default",
conn_type="oracle",
host="localhost",
login="root",
password="password",
schema="schema",
port=1521,
),
session,
)
merge_conn(
Connection(
conn_id="oss_default",
conn_type="oss",
extra="""{
"auth_type": "AK",
"access_key_id": "<ACCESS_KEY_ID>",
"access_key_secret": "<ACCESS_KEY_SECRET>",
"region": "<YOUR_OSS_REGION>"}
""",
),
session,
)
merge_conn(
Connection(
conn_id="pig_cli_default",
conn_type="pig_cli",
schema="default",
),
session,
)
merge_conn(
Connection(
conn_id="pinot_admin_default",
conn_type="pinot",
host="localhost",
port=9000,
),
session,
)
merge_conn(
Connection(
conn_id="pinot_broker_default",
conn_type="pinot",
host="localhost",
port=9000,
extra='{"endpoint": "/query", "schema": "http"}',
),
session,
)
merge_conn(
Connection(
conn_id="postgres_default",
conn_type="postgres",
login="postgres",
password="airflow",
schema="airflow",
host="postgres",
),
session,
)
merge_conn(
Connection(
conn_id="presto_default",
conn_type="presto",
host="localhost",
schema="hive",
port=3400,
),
session,
)
merge_conn(
Connection(
conn_id="qdrant_default",
conn_type="qdrant",
host="qdrant",
port=6333,
),
session,
)
merge_conn(
Connection(
conn_id="redis_default",
conn_type="redis",
host="redis",
port=6379,
extra='{"db": 0}',
),
session,
)
merge_conn(
Connection(
conn_id="redshift_default",
conn_type="redshift",
extra="""{
"iam": true,
"cluster_identifier": "<REDSHIFT_CLUSTER_IDENTIFIER>",
"port": 5439,
"profile": "default",
"db_user": "awsuser",
"database": "dev",
"region": ""
}""",
),
session,
)
merge_conn(
Connection(
conn_id="salesforce_default",
conn_type="salesforce",
login="username",
password="password",
extra='{"security_token": "security_token"}',
),
session,
)
merge_conn(
Connection(
conn_id="segment_default",
conn_type="segment",
extra='{"write_key": "my-segment-write-key"}',
),
session,
)
merge_conn(
Connection(
conn_id="sftp_default",
conn_type="sftp",
host="localhost",
port=22,
login="airflow",
extra='{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}',
),
session,
)
merge_conn(
Connection(
conn_id="spark_default",
conn_type="spark",
host="yarn",
extra='{"queue": "root.default"}',
),
session,
)
merge_conn(
Connection(
conn_id="sqlite_default",
conn_type="sqlite",
host=os.path.join(gettempdir(), "sqlite_default.db"),
),
session,
)
merge_conn(
Connection(
conn_id="ssh_default",
conn_type="ssh",
host="localhost",
),
session,
)
merge_conn(
Connection(
conn_id="tableau_default",
conn_type="tableau",
host="https://tableau.server.url",
login="user",
password="password",
extra='{"site_id": "my_site"}',
),
session,
)
merge_conn(
Connection(
conn_id="tabular_default",
conn_type="tabular",
host="https://api.tabulardata.io/ws/v1",
),
session,
)
merge_conn(
Connection(
conn_id="teradata_default",
conn_type="teradata",
host="localhost",
login="user",
password="password",
schema="schema",
),
session,
)
merge_conn(
Connection(
conn_id="trino_default",
conn_type="trino",
host="localhost",
schema="hive",
port=3400,
),
session,
)
merge_conn(
Connection(
conn_id="vertica_default",
conn_type="vertica",
host="localhost",
port=5433,
),
session,
)
merge_conn(
Connection(
conn_id="wasb_default",
conn_type="wasb",
extra='{"sas_token": null}',
),
session,
)
merge_conn(
Connection(
conn_id="webhdfs_default",
conn_type="hdfs",
host="localhost",
port=50070,
),
session,
)
merge_conn(
Connection(
conn_id="yandexcloud_default",
conn_type="yandexcloud",
schema="default",
),
session,
) |
Initialize Airflow database. | def initdb(session: Session = NEW_SESSION, load_connections: bool = True):
"""Initialize Airflow database."""
import_all_models()
db_exists = _get_current_revision(session)
if db_exists:
upgradedb(session=session)
else:
_create_db_from_orm(session=session)
if conf.getboolean("database", "LOAD_DEFAULT_CONNECTIONS") and load_connections:
create_default_connections(session=session)
# Add default pool & sync log_template
add_default_pool_if_not_exists(session=session)
synchronize_log_template(session=session) |
Wait for all airflow migrations to complete.
:param timeout: Timeout for the migration in seconds
:return: None | def check_migrations(timeout):
"""
Wait for all airflow migrations to complete.
:param timeout: Timeout for the migration in seconds
:return: None
"""
timeout = timeout or 1 # run the loop at least 1
with _configured_alembic_environment() as env:
context = env.get_context()
source_heads = None
db_heads = None
for ticker in range(timeout):
source_heads = set(env.script.get_heads())
db_heads = set(context.get_current_heads())
if source_heads == db_heads:
return
time.sleep(1)
log.info("Waiting for migrations... %s second(s)", ticker)
raise TimeoutError(
f"There are still unapplied migrations after {timeout} seconds. Migration"
f"Head(s) in DB: {db_heads} | Migration Head(s) in Source Code: {source_heads}"
) |
Check and run migrations if necessary. Only use in a tty. | def check_and_run_migrations():
"""Check and run migrations if necessary. Only use in a tty."""
with _configured_alembic_environment() as env:
context = env.get_context()
source_heads = set(env.script.get_heads())
db_heads = set(context.get_current_heads())
db_command = None
command_name = None
verb = None
if len(db_heads) < 1:
db_command = initdb
command_name = "init"
verb = "initialize"
elif source_heads != db_heads:
db_command = upgradedb
command_name = "upgrade"
verb = "upgrade"
if sys.stdout.isatty() and verb:
print()
question = f"Please confirm database {verb} (or wait 4 seconds to skip it). Are you sure? [y/N]"
try:
answer = helpers.prompt_with_timeout(question, timeout=4, default=False)
if answer:
try:
db_command()
print(f"DB {verb} done")
except Exception as error:
from airflow.version import version
print(error)
print(
"You still have unapplied migrations. "
f"You may need to {verb} the database by running `airflow db {command_name}`. ",
f"Make sure the command is run using Airflow version {version}.",
file=sys.stderr,
)
sys.exit(1)
except AirflowException:
pass
elif source_heads != db_heads:
from airflow.version import version
print(
f"ERROR: You need to {verb} the database. Please run `airflow db {command_name}`. "
f"Make sure the command is run using Airflow version {version}.",
file=sys.stderr,
)
sys.exit(1) |
Synchronize log template configs with table.
This checks if the last row fully matches the current config values, and
insert a new row if not. | def synchronize_log_template(*, session: Session = NEW_SESSION) -> None:
"""Synchronize log template configs with table.
This checks if the last row fully matches the current config values, and
insert a new row if not.
"""
# NOTE: SELECT queries in this function are INTENTIONALLY written with the
# SQL builder style, not the ORM query API. This avoids configuring the ORM
# unless we need to insert something, speeding up CLI in general.
from airflow.models.tasklog import LogTemplate
metadata = reflect_tables([LogTemplate], session)
log_template_table: Table | None = metadata.tables.get(LogTemplate.__tablename__)
if log_template_table is None:
log.info("Log template table does not exist (added in 2.3.0); skipping log template sync.")
return
filename = conf.get("logging", "log_filename_template")
elasticsearch_id = conf.get("elasticsearch", "log_id_template")
stored = session.execute(
select(
log_template_table.c.filename,
log_template_table.c.elasticsearch_id,
)
.order_by(log_template_table.c.id.desc())
.limit(1)
).first()
# If we have an empty table, and the default values exist, we will seed the
# table with values from pre 2.3.0, so old logs will still be retrievable.
if not stored:
is_default_log_id = elasticsearch_id == conf.get_default_value("elasticsearch", "log_id_template")
is_default_filename = filename == conf.get_default_value("logging", "log_filename_template")
if is_default_log_id and is_default_filename:
session.add(
LogTemplate(
filename="{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log",
elasticsearch_id="{dag_id}-{task_id}-{execution_date}-{try_number}",
)
)
# Before checking if the _current_ value exists, we need to check if the old config value we upgraded in
# place exists!
pre_upgrade_filename = conf.upgraded_values.get(("logging", "log_filename_template"), filename)
pre_upgrade_elasticsearch_id = conf.upgraded_values.get(
("elasticsearch", "log_id_template"), elasticsearch_id
)
if pre_upgrade_filename != filename or pre_upgrade_elasticsearch_id != elasticsearch_id:
# The previous non-upgraded value likely won't be the _latest_ value (as after we've recorded the
# recorded the upgraded value it will be second-to-newest), so we'll have to just search which is okay
# as this is a table with a tiny number of rows
row = session.execute(
select(log_template_table.c.id)
.where(
or_(
log_template_table.c.filename == pre_upgrade_filename,
log_template_table.c.elasticsearch_id == pre_upgrade_elasticsearch_id,
)
)
.order_by(log_template_table.c.id.desc())
.limit(1)
).first()
if not row:
session.add(
LogTemplate(filename=pre_upgrade_filename, elasticsearch_id=pre_upgrade_elasticsearch_id)
)
if not stored or stored.filename != filename or stored.elasticsearch_id != elasticsearch_id:
session.add(LogTemplate(filename=filename, elasticsearch_id=elasticsearch_id)) |
Check unique conn_id in connection table.
:param session: session of the sqlalchemy | def check_conn_id_duplicates(session: Session) -> Iterable[str]:
"""
Check unique conn_id in connection table.
:param session: session of the sqlalchemy
"""
from airflow.models.connection import Connection
try:
dups = session.scalars(
select(Connection.conn_id).group_by(Connection.conn_id).having(func.count() > 1)
).all()
except (exc.OperationalError, exc.ProgrammingError):
# fallback if tables hasn't been created yet
session.rollback()
return
if dups:
yield (
"Seems you have non unique conn_id in connection table.\n"
"You have to manage those duplicate connections "
"before upgrading the database.\n"
f"Duplicated conn_id: {dups}"
) |
Check unique username in User & RegisterUser table.
:param session: session of the sqlalchemy
:rtype: str | def check_username_duplicates(session: Session) -> Iterable[str]:
"""
Check unique username in User & RegisterUser table.
:param session: session of the sqlalchemy
:rtype: str
"""
from airflow.providers.fab.auth_manager.models import RegisterUser, User
for model in [User, RegisterUser]:
dups = []
try:
dups = session.execute(
select(model.username) # type: ignore[attr-defined]
.group_by(model.username) # type: ignore[attr-defined]
.having(func.count() > 1)
).all()
except (exc.OperationalError, exc.ProgrammingError):
# fallback if tables hasn't been created yet
session.rollback()
if dups:
yield (
f"Seems you have mixed case usernames in {model.__table__.name} table.\n" # type: ignore
"You have to rename or delete those mixed case usernames "
"before upgrading the database.\n"
f"usernames with mixed cases: {[dup.username for dup in dups]}"
) |
When running checks prior to upgrades, we use reflection to determine current state of the database.
This function gets the current state of each table in the set of models
provided and returns a SqlAlchemy metadata object containing them. | def reflect_tables(tables: list[Base | str] | None, session):
"""
When running checks prior to upgrades, we use reflection to determine current state of the database.
This function gets the current state of each table in the set of models
provided and returns a SqlAlchemy metadata object containing them.
"""
import sqlalchemy.schema
bind = session.bind
metadata = sqlalchemy.schema.MetaData()
if tables is None:
metadata.reflect(bind=bind, resolve_fks=False)
else:
for tbl in tables:
try:
table_name = tbl if isinstance(tbl, str) else tbl.__tablename__
metadata.reflect(bind=bind, only=[table_name], extend_existing=True, resolve_fks=False)
except exc.InvalidRequestError:
continue
return metadata |
Check table for duplicates, given a list of columns which define the uniqueness of the table.
Usage example:
.. code-block:: python
def check_task_fail_for_duplicates(session):
from airflow.models.taskfail import TaskFail
metadata = reflect_tables([TaskFail], session)
task_fail = metadata.tables.get(TaskFail.__tablename__) # type: ignore
if task_fail is None: # table not there
return
if "run_id" in task_fail.columns: # upgrade already applied
return
yield from check_table_for_duplicates(
table_name=task_fail.name,
uniqueness=["dag_id", "task_id", "execution_date"],
session=session,
version="2.3",
)
:param table_name: table name to check
:param uniqueness: uniqueness constraint to evaluate against
:param session: session of the sqlalchemy | def check_table_for_duplicates(
*, session: Session, table_name: str, uniqueness: list[str], version: str
) -> Iterable[str]:
"""
Check table for duplicates, given a list of columns which define the uniqueness of the table.
Usage example:
.. code-block:: python
def check_task_fail_for_duplicates(session):
from airflow.models.taskfail import TaskFail
metadata = reflect_tables([TaskFail], session)
task_fail = metadata.tables.get(TaskFail.__tablename__) # type: ignore
if task_fail is None: # table not there
return
if "run_id" in task_fail.columns: # upgrade already applied
return
yield from check_table_for_duplicates(
table_name=task_fail.name,
uniqueness=["dag_id", "task_id", "execution_date"],
session=session,
version="2.3",
)
:param table_name: table name to check
:param uniqueness: uniqueness constraint to evaluate against
:param session: session of the sqlalchemy
"""
minimal_table_obj = table(table_name, *(column(x) for x in uniqueness))
try:
subquery = session.execute(
select(minimal_table_obj, func.count().label("dupe_count"))
.group_by(*(text(x) for x in uniqueness))
.having(func.count() > text("1"))
.subquery()
)
dupe_count = session.scalar(select(func.sum(subquery.c.dupe_count)))
if not dupe_count:
# there are no duplicates; nothing to do.
return
log.warning("Found %s duplicates in table %s. Will attempt to move them.", dupe_count, table_name)
metadata = reflect_tables(tables=[table_name], session=session)
if table_name not in metadata.tables:
yield f"Table {table_name} does not exist in the database."
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
table_obj = metadata.tables[table_name]
_move_duplicate_data_to_new_table(
session=session,
source_table=table_obj,
subquery=subquery,
uniqueness=uniqueness,
target_table_name=_format_airflow_moved_table_name(table_name, version, "duplicates"),
)
except (exc.OperationalError, exc.ProgrammingError):
# fallback if `table_name` hasn't been created yet
session.rollback() |
Check nullable conn_type column in Connection table.
:param session: session of the sqlalchemy | def check_conn_type_null(session: Session) -> Iterable[str]:
"""
Check nullable conn_type column in Connection table.
:param session: session of the sqlalchemy
"""
from airflow.models.connection import Connection
try:
n_nulls = session.scalars(select(Connection.conn_id).where(Connection.conn_type.is_(None))).all()
except (exc.OperationalError, exc.ProgrammingError, exc.InternalError):
# fallback if tables hasn't been created yet
session.rollback()
return
if n_nulls:
yield (
"The conn_type column in the connection "
"table must contain content.\n"
"Make sure you don't have null "
"in the conn_type column.\n"
f"Null conn_type conn_id: {n_nulls}"
) |
Create a new table with rows from query.
We have to handle CTAS differently for different dialects. | def _create_table_as(
*,
session,
dialect_name: str,
source_query: Query,
target_table_name: str,
source_table_name: str,
):
"""
Create a new table with rows from query.
We have to handle CTAS differently for different dialects.
"""
if dialect_name == "mysql":
# MySQL with replication needs this split in to two queries, so just do it for all MySQL
# ERROR 1786 (HY000): Statement violates GTID consistency: CREATE TABLE ... SELECT.
session.execute(text(f"CREATE TABLE {target_table_name} LIKE {source_table_name}"))
session.execute(
text(
f"INSERT INTO {target_table_name} {source_query.selectable.compile(bind=session.get_bind())}"
)
)
else:
# Postgres and SQLite both support the same "CREATE TABLE a AS SELECT ..." syntax
select_table = source_query.selectable.compile(bind=session.get_bind())
session.execute(text(f"CREATE TABLE {target_table_name} AS {select_table}")) |
Given a source table, we generate a subquery that will return 1 for every row that has a dagrun. | def _dangling_against_dag_run(session, source_table, dag_run):
"""Given a source table, we generate a subquery that will return 1 for every row that has a dagrun."""
source_to_dag_run_join_cond = and_(
source_table.c.dag_id == dag_run.c.dag_id,
source_table.c.execution_date == dag_run.c.execution_date,
)
return (
select(*(c.label(c.name) for c in source_table.c))
.join(dag_run, source_to_dag_run_join_cond, isouter=True)
.where(dag_run.c.dag_id.is_(None))
) |
Given a source table, generate a subquery that will return 1 for every row that has a valid task instance.
This is used to identify rows that need to be removed from tables prior to adding a TI fk.
Since this check is applied prior to running the migrations, we have to use different
query logic depending on which revision the database is at. | def _dangling_against_task_instance(session, source_table, dag_run, task_instance):
"""
Given a source table, generate a subquery that will return 1 for every row that has a valid task instance.
This is used to identify rows that need to be removed from tables prior to adding a TI fk.
Since this check is applied prior to running the migrations, we have to use different
query logic depending on which revision the database is at.
"""
if "run_id" not in task_instance.c:
# db is < 2.2.0
dr_join_cond = and_(
source_table.c.dag_id == dag_run.c.dag_id,
source_table.c.execution_date == dag_run.c.execution_date,
)
ti_join_cond = and_(
dag_run.c.dag_id == task_instance.c.dag_id,
dag_run.c.execution_date == task_instance.c.execution_date,
source_table.c.task_id == task_instance.c.task_id,
)
else:
# db is 2.2.0 <= version < 2.3.0
dr_join_cond = and_(
source_table.c.dag_id == dag_run.c.dag_id,
source_table.c.execution_date == dag_run.c.execution_date,
)
ti_join_cond = and_(
dag_run.c.dag_id == task_instance.c.dag_id,
dag_run.c.run_id == task_instance.c.run_id,
source_table.c.task_id == task_instance.c.task_id,
)
return (
select(*(c.label(c.name) for c in source_table.c))
.outerjoin(dag_run, dr_join_cond)
.outerjoin(task_instance, ti_join_cond)
.where(or_(task_instance.c.dag_id.is_(None), dag_run.c.dag_id.is_(None)))
) |
When adding a uniqueness constraint we first should ensure that there are no duplicate rows.
This function accepts a subquery that should return one record for each row with duplicates (e.g.
a group by with having count(*) > 1). We select from ``source_table`` getting all rows matching the
subquery result and store in ``target_table_name``. Then to purge the duplicates from the source table,
we do a DELETE FROM with a join to the target table (which now contains the dupes).
:param session: sqlalchemy session for metadata db
:param source_table: table to purge dupes from
:param subquery: the subquery that returns the duplicate rows
:param uniqueness: the string list of columns used to define the uniqueness for the table. used in
building the DELETE FROM join condition.
:param target_table_name: name of the table in which to park the duplicate rows | def _move_duplicate_data_to_new_table(
session, source_table: Table, subquery: Query, uniqueness: list[str], target_table_name: str
):
"""
When adding a uniqueness constraint we first should ensure that there are no duplicate rows.
This function accepts a subquery that should return one record for each row with duplicates (e.g.
a group by with having count(*) > 1). We select from ``source_table`` getting all rows matching the
subquery result and store in ``target_table_name``. Then to purge the duplicates from the source table,
we do a DELETE FROM with a join to the target table (which now contains the dupes).
:param session: sqlalchemy session for metadata db
:param source_table: table to purge dupes from
:param subquery: the subquery that returns the duplicate rows
:param uniqueness: the string list of columns used to define the uniqueness for the table. used in
building the DELETE FROM join condition.
:param target_table_name: name of the table in which to park the duplicate rows
"""
bind = session.get_bind()
dialect_name = bind.dialect.name
query = (
select(*(source_table.c[x.name].label(str(x.name)) for x in source_table.columns))
.select_from(source_table)
.join(subquery, and_(*(source_table.c[x] == subquery.c[x] for x in uniqueness)))
)
_create_table_as(
session=session,
dialect_name=dialect_name,
source_query=query,
target_table_name=target_table_name,
source_table_name=source_table.name,
)
# we must ensure that the CTAS table is created prior to the DELETE step since we have to join to it
session.commit()
metadata = reflect_tables([target_table_name], session)
target_table = metadata.tables[target_table_name]
where_clause = and_(*(source_table.c[x] == target_table.c[x] for x in uniqueness))
if dialect_name == "sqlite":
subq = query.selectable.with_only_columns([text(f"{source_table}.ROWID")])
delete = source_table.delete().where(column("ROWID").in_(subq))
else:
delete = source_table.delete(where_clause)
session.execute(delete) |
Go through each table and look for records that can't be mapped to a dag run.
When we find such "dangling" rows we back them up in a special table and delete them
from the main table.
Starting in Airflow 2.2, we began a process of replacing `execution_date` with `run_id` in many tables. | def check_bad_references(session: Session) -> Iterable[str]:
"""
Go through each table and look for records that can't be mapped to a dag run.
When we find such "dangling" rows we back them up in a special table and delete them
from the main table.
Starting in Airflow 2.2, we began a process of replacing `execution_date` with `run_id` in many tables.
"""
from airflow.models.dagrun import DagRun
from airflow.models.renderedtifields import RenderedTaskInstanceFields
from airflow.models.taskfail import TaskFail
from airflow.models.taskinstance import TaskInstance
from airflow.models.taskreschedule import TaskReschedule
from airflow.models.xcom import XCom
@dataclass
class BadReferenceConfig:
"""
Bad reference config class.
:param bad_rows_func: function that returns subquery which determines whether bad rows exist
:param join_tables: table objects referenced in subquery
:param ref_table: information-only identifier for categorizing the missing ref
"""
bad_rows_func: Callable
join_tables: list[str]
ref_table: str
missing_dag_run_config = BadReferenceConfig(
bad_rows_func=_dangling_against_dag_run,
join_tables=["dag_run"],
ref_table="dag_run",
)
missing_ti_config = BadReferenceConfig(
bad_rows_func=_dangling_against_task_instance,
join_tables=["dag_run", "task_instance"],
ref_table="task_instance",
)
models_list: list[tuple[Base, str, BadReferenceConfig]] = [
(TaskInstance, "2.2", missing_dag_run_config),
(TaskReschedule, "2.2", missing_ti_config),
(RenderedTaskInstanceFields, "2.3", missing_ti_config),
(TaskFail, "2.3", missing_ti_config),
(XCom, "2.3", missing_ti_config),
]
metadata = reflect_tables([*(x[0] for x in models_list), DagRun, TaskInstance], session)
if (
not metadata.tables
or metadata.tables.get(DagRun.__tablename__) is None
or metadata.tables.get(TaskInstance.__tablename__) is None
):
# Key table doesn't exist -- likely empty DB.
return
existing_table_names = set(inspect(session.get_bind()).get_table_names())
errored = False
for model, change_version, bad_ref_cfg in models_list:
log.debug("checking model %s", model.__tablename__)
# We can't use the model here since it may differ from the db state due to
# this function is run prior to migration. Use the reflected table instead.
source_table = metadata.tables.get(model.__tablename__) # type: ignore
if source_table is None:
continue
# Migration already applied, don't check again.
if "run_id" in source_table.columns:
continue
func_kwargs = {x: metadata.tables[x] for x in bad_ref_cfg.join_tables}
bad_rows_query = bad_ref_cfg.bad_rows_func(session, source_table, **func_kwargs)
dangling_table_name = _format_airflow_moved_table_name(source_table.name, change_version, "dangling")
if dangling_table_name in existing_table_names:
invalid_row_count = get_query_count(bad_rows_query, session=session)
if invalid_row_count:
yield _format_dangling_error(
source_table=source_table.name,
target_table=dangling_table_name,
invalid_count=invalid_row_count,
reason=f"without a corresponding {bad_ref_cfg.ref_table} row",
)
errored = True
continue
log.debug("moving data for table %s", source_table.name)
_move_dangling_data_to_new_table(
session,
source_table,
bad_rows_query,
dangling_table_name,
)
if errored:
session.rollback()
else:
session.commit() |
:session: session of the sqlalchemy. | def _check_migration_errors(session: Session = NEW_SESSION) -> Iterable[str]:
""":session: session of the sqlalchemy."""
check_functions: tuple[Callable[..., Iterable[str]], ...] = (
check_conn_id_duplicates,
check_conn_type_null,
check_run_id_null,
check_bad_references,
check_username_duplicates,
)
for check_fn in check_functions:
log.debug("running check function %s", check_fn.__name__)
yield from check_fn(session=session) |
Check that all supplied revision ids are above the minimum revision for the dialect.
:param config: Alembic config
:param revisions: list of Alembic revision ids
:return: None | def _revisions_above_min_for_offline(config, revisions) -> None:
"""
Check that all supplied revision ids are above the minimum revision for the dialect.
:param config: Alembic config
:param revisions: list of Alembic revision ids
:return: None
"""
dbname = settings.engine.dialect.name
if dbname == "sqlite":
raise AirflowException("Offline migration not supported for SQLite.")
min_version, min_revision = ("2.2.0", "7b2661a43ba3") if dbname == "mssql" else ("2.0.0", "e959f08ac86c")
# Check if there is history between the revisions and the start revision
# This ensures that the revisions are above `min_revision`
for rev in revisions:
if not _revision_greater(config, rev, min_revision):
raise ValueError(
f"Error while checking history for revision range {min_revision}:{rev}. "
f"Check that {rev} is a valid revision. "
f"For dialect {dbname!r}, supported revision for offline migration is from {min_revision} "
f"which corresponds to Airflow {min_version}."
) |
Upgrades the DB.
:param to_revision: Optional Alembic revision ID to upgrade *to*.
If omitted, upgrades to latest revision.
:param from_revision: Optional Alembic revision ID to upgrade *from*.
Not compatible with ``sql_only=False``.
:param show_sql_only: if True, migration statements will be printed but not executed.
:param session: sqlalchemy session with connection to Airflow metadata database
:return: None | def upgradedb(
*,
to_revision: str | None = None,
from_revision: str | None = None,
show_sql_only: bool = False,
reserialize_dags: bool = True,
session: Session = NEW_SESSION,
):
"""
Upgrades the DB.
:param to_revision: Optional Alembic revision ID to upgrade *to*.
If omitted, upgrades to latest revision.
:param from_revision: Optional Alembic revision ID to upgrade *from*.
Not compatible with ``sql_only=False``.
:param show_sql_only: if True, migration statements will be printed but not executed.
:param session: sqlalchemy session with connection to Airflow metadata database
:return: None
"""
if from_revision and not show_sql_only:
raise AirflowException("`from_revision` only supported with `sql_only=True`.")
# alembic adds significant import time, so we import it lazily
if not settings.SQL_ALCHEMY_CONN:
raise RuntimeError("The settings.SQL_ALCHEMY_CONN not set. This is a critical assertion.")
from alembic import command
import_all_models()
config = _get_alembic_config()
if show_sql_only:
if not from_revision:
from_revision = _get_current_revision(session)
if not to_revision:
script = _get_script_object()
to_revision = script.get_current_head()
if to_revision == from_revision:
print_happy_cat("No migrations to apply; nothing to do.")
return
if not _revision_greater(config, to_revision, from_revision):
raise ValueError(
f"Requested *to* revision {to_revision} is older than *from* revision {from_revision}. "
"Please check your requested versions / revisions."
)
_revisions_above_min_for_offline(config=config, revisions=[from_revision, to_revision])
_offline_migration(command.upgrade, config, f"{from_revision}:{to_revision}")
return # only running sql; our job is done
errors_seen = False
for err in _check_migration_errors(session=session):
if not errors_seen:
log.error("Automatic migration is not available")
errors_seen = True
log.error("%s", err)
if errors_seen:
exit(1)
if not to_revision and not _get_current_revision(session=session):
# Don't load default connections
# New DB; initialize and exit
initdb(session=session, load_connections=False)
return
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS):
import sqlalchemy.pool
log.info("Creating tables")
val = os.environ.get("AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE")
try:
# Reconfigure the ORM to use _EXACTLY_ one connection, otherwise some db engines hang forever
# trying to ALTER TABLEs
os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE"] = "1"
settings.reconfigure_orm(pool_class=sqlalchemy.pool.SingletonThreadPool)
command.upgrade(config, revision=to_revision or "heads")
finally:
if val is None:
os.environ.pop("AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE")
else:
os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE"] = val
settings.reconfigure_orm()
if reserialize_dags:
_reserialize_dags(session=session)
add_default_pool_if_not_exists(session=session)
synchronize_log_template(session=session) |
Clear out the database. | def resetdb(session: Session = NEW_SESSION, skip_init: bool = False):
"""Clear out the database."""
if not settings.engine:
raise RuntimeError("The settings.engine must be set. This is a critical assertion")
log.info("Dropping tables that exist")
import_all_models()
connection = settings.engine.connect()
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS), connection.begin():
drop_airflow_models(connection)
drop_airflow_moved_tables(connection)
if not skip_init:
initdb(session=session) |
Downgrade the airflow metastore schema to a prior version.
:param to_revision: The alembic revision to downgrade *to*.
:param show_sql_only: if True, print sql statements but do not run them
:param from_revision: if supplied, alembic revision to dawngrade *from*. This may only
be used in conjunction with ``sql=True`` because if we actually run the commands,
we should only downgrade from the *current* revision.
:param session: sqlalchemy session for connection to airflow metadata database | def downgrade(*, to_revision, from_revision=None, show_sql_only=False, session: Session = NEW_SESSION):
"""
Downgrade the airflow metastore schema to a prior version.
:param to_revision: The alembic revision to downgrade *to*.
:param show_sql_only: if True, print sql statements but do not run them
:param from_revision: if supplied, alembic revision to dawngrade *from*. This may only
be used in conjunction with ``sql=True`` because if we actually run the commands,
we should only downgrade from the *current* revision.
:param session: sqlalchemy session for connection to airflow metadata database
"""
if from_revision and not show_sql_only:
raise ValueError(
"`from_revision` can't be combined with `sql=False`. When actually "
"applying a downgrade (instead of just generating sql), we always "
"downgrade from current revision."
)
if not settings.SQL_ALCHEMY_CONN:
raise RuntimeError("The settings.SQL_ALCHEMY_CONN not set.")
# alembic adds significant import time, so we import it lazily
from alembic import command
log.info("Attempting downgrade to revision %s", to_revision)
config = _get_alembic_config()
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS):
if show_sql_only:
log.warning("Generating sql scripts for manual migration.")
if not from_revision:
from_revision = _get_current_revision(session)
revision_range = f"{from_revision}:{to_revision}"
_offline_migration(command.downgrade, config=config, revision=revision_range)
else:
log.info("Applying downgrade migrations.")
command.downgrade(config, revision=to_revision, sql=show_sql_only) |
Drop all airflow models.
:param connection: SQLAlchemy Connection
:return: None | def drop_airflow_models(connection):
"""
Drop all airflow models.
:param connection: SQLAlchemy Connection
:return: None
"""
from airflow.models.base import Base
from airflow.providers.fab.auth_manager.models import Model
Base.metadata.drop_all(connection)
Model.metadata.drop_all(connection)
db = _get_flask_db(connection.engine.url)
db.drop_all()
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
migration_ctx = MigrationContext.configure(connection)
version = migration_ctx._version
if inspect(connection).has_table(version.name):
version.drop(connection) |
Check if the database works.
:param session: session of the sqlalchemy | def check(session: Session = NEW_SESSION):
"""
Check if the database works.
:param session: session of the sqlalchemy
"""
session.execute(text("select 1 as is_alive;"))
log.info("Connection successful.") |
Contextmanager that will create and teardown a global db lock. | def create_global_lock(
session: Session,
lock: DBLocks,
lock_timeout: int = 1800,
) -> Generator[None, None, None]:
"""Contextmanager that will create and teardown a global db lock."""
conn = session.get_bind().connect()
dialect = conn.dialect
try:
if dialect.name == "postgresql":
conn.execute(text("SET LOCK_TIMEOUT to :timeout"), {"timeout": lock_timeout})
conn.execute(text("SELECT pg_advisory_lock(:id)"), {"id": lock.value})
elif dialect.name == "mysql" and dialect.server_version_info >= (5, 6):
conn.execute(text("SELECT GET_LOCK(:id, :timeout)"), {"id": str(lock), "timeout": lock_timeout})
yield
finally:
if dialect.name == "postgresql":
conn.execute(text("SET LOCK_TIMEOUT TO DEFAULT"))
(unlocked,) = conn.execute(text("SELECT pg_advisory_unlock(:id)"), {"id": lock.value}).fetchone()
if not unlocked:
raise RuntimeError("Error releasing DB lock!")
elif dialect.name == "mysql" and dialect.server_version_info >= (5, 6):
conn.execute(text("select RELEASE_LOCK(:id)"), {"id": str(lock)}) |
Compare types between ORM and DB .
return False if the metadata_type is the same as the inspected_type
or None to allow the default implementation to compare these
types. a return value of True means the two types do not
match and should result in a type change operation. | def compare_type(context, inspected_column, metadata_column, inspected_type, metadata_type):
"""
Compare types between ORM and DB .
return False if the metadata_type is the same as the inspected_type
or None to allow the default implementation to compare these
types. a return value of True means the two types do not
match and should result in a type change operation.
"""
if context.dialect.name == "mysql":
from sqlalchemy import String
from sqlalchemy.dialects import mysql
if isinstance(inspected_type, mysql.VARCHAR) and isinstance(metadata_type, String):
# This is a hack to get around MySQL VARCHAR collation
# not being possible to change from utf8_bin to utf8mb3_bin.
# We only make sure lengths are the same
if inspected_type.length != metadata_type.length:
return True
return False
return None |
Compare server defaults between ORM and DB .
return True if the defaults are different, False if not, or None to allow the default implementation
to compare these defaults
In SQLite: task_instance.map_index & task_reschedule.map_index
are not comparing accurately. Sometimes they are equal, sometimes they are not.
Alembic warned that this feature has varied accuracy depending on backends.
See: (https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.
environment.EnvironmentContext.configure.params.compare_server_default) | def compare_server_default(
context, inspected_column, metadata_column, inspected_default, metadata_default, rendered_metadata_default
):
"""
Compare server defaults between ORM and DB .
return True if the defaults are different, False if not, or None to allow the default implementation
to compare these defaults
In SQLite: task_instance.map_index & task_reschedule.map_index
are not comparing accurately. Sometimes they are equal, sometimes they are not.
Alembic warned that this feature has varied accuracy depending on backends.
See: (https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.
environment.EnvironmentContext.configure.params.compare_server_default)
"""
dialect_name = context.connection.dialect.name
if dialect_name in ["sqlite"]:
return False
if (
dialect_name == "mysql"
and metadata_column.name == "pool_slots"
and metadata_column.table.name == "task_instance"
):
# We removed server_default value in ORM to avoid expensive migration
# (it was removed in postgres DB in migration head 7b2661a43ba3 ).
# As a side note, server default value here was only actually needed for the migration
# where we added the column in the first place -- now that it exists and all
# existing rows are populated with a value this server default is never used.
return False
return None |
Get all SQLAlchemy class mappers.
SQLAlchemy < 1.4 does not support registry.mappers so we use
try/except to handle it. | def get_sqla_model_classes():
"""
Get all SQLAlchemy class mappers.
SQLAlchemy < 1.4 does not support registry.mappers so we use
try/except to handle it.
"""
from airflow.models.base import Base
try:
return [mapper.class_ for mapper in Base.registry.mappers]
except AttributeError:
return Base._decl_class_registry.values() |
Get count of query.
A SELECT COUNT() FROM is issued against the subquery built from the
given statement. The ORDER BY clause is stripped from the statement
since it's unnecessary for COUNT, and can impact query planning and
degrade performance.
:meta private: | def get_query_count(query_stmt: Select, *, session: Session) -> int:
"""Get count of query.
A SELECT COUNT() FROM is issued against the subquery built from the
given statement. The ORDER BY clause is stripped from the statement
since it's unnecessary for COUNT, and can impact query planning and
degrade performance.
:meta private:
"""
count_stmt = select(func.count()).select_from(query_stmt.order_by(None).subquery())
return session.scalar(count_stmt) |
Check whether there is at least one row matching given clause.
This does a SELECT 1 WHERE ... LIMIT 1 and check the result.
:meta private: | def exists_query(*where: ClauseElement, session: Session) -> bool:
"""Check whether there is at least one row matching given clause.
This does a SELECT 1 WHERE ... LIMIT 1 and check the result.
:meta private:
"""
stmt = select(literal(True)).where(*where).limit(1)
return session.scalar(stmt) is not None |
Suppresses errors but logs them.
Also stores the exception instance so it can be referred to after exiting context. | def _suppress_with_logging(table, session):
"""
Suppresses errors but logs them.
Also stores the exception instance so it can be referred to after exiting context.
"""
try:
yield
except (OperationalError, ProgrammingError):
logger.warning("Encountered error when attempting to clean table '%s'. ", table)
logger.debug("Traceback for table '%s'", table, exc_info=True)
if session.is_active:
logger.debug("Rolling back transaction")
session.rollback() |
Purges old records in airflow metadata database.
The last non-externally-triggered dag run will always be kept in order to ensure
continuity of scheduled dag runs.
Where there are foreign key relationships, deletes will cascade, so that for
example if you clean up old dag runs, the associated task instances will
be deleted.
:param clean_before_timestamp: The timestamp before which data should be purged
:param table_names: Optional. List of table names to perform maintenance on. If list not provided,
will perform maintenance on all tables.
:param dry_run: If true, print rows meeting deletion criteria
:param verbose: If true, may provide more detailed output.
:param confirm: Require user input to confirm before processing deletions.
:param skip_archive: Set to True if you don't want the purged rows preservied in an archive table.
:param session: Session representing connection to the metadata database. | def run_cleanup(
*,
clean_before_timestamp: DateTime,
table_names: list[str] | None = None,
dry_run: bool = False,
verbose: bool = False,
confirm: bool = True,
skip_archive: bool = False,
session: Session = NEW_SESSION,
):
"""
Purges old records in airflow metadata database.
The last non-externally-triggered dag run will always be kept in order to ensure
continuity of scheduled dag runs.
Where there are foreign key relationships, deletes will cascade, so that for
example if you clean up old dag runs, the associated task instances will
be deleted.
:param clean_before_timestamp: The timestamp before which data should be purged
:param table_names: Optional. List of table names to perform maintenance on. If list not provided,
will perform maintenance on all tables.
:param dry_run: If true, print rows meeting deletion criteria
:param verbose: If true, may provide more detailed output.
:param confirm: Require user input to confirm before processing deletions.
:param skip_archive: Set to True if you don't want the purged rows preservied in an archive table.
:param session: Session representing connection to the metadata database.
"""
clean_before_timestamp = timezone.coerce_datetime(clean_before_timestamp)
effective_table_names, effective_config_dict = _effective_table_names(table_names=table_names)
if dry_run:
print("Performing dry run for db cleanup.")
print(
f"Data prior to {clean_before_timestamp} would be purged "
f"from tables {effective_table_names} with the following config:\n"
)
_print_config(configs=effective_config_dict)
if not dry_run and confirm:
_confirm_delete(date=clean_before_timestamp, tables=sorted(effective_table_names))
existing_tables = reflect_tables(tables=None, session=session).tables
for table_name, table_config in effective_config_dict.items():
if table_name in existing_tables:
with _suppress_with_logging(table_name, session):
_cleanup_table(
clean_before_timestamp=clean_before_timestamp,
dry_run=dry_run,
verbose=verbose,
**table_config.__dict__,
skip_archive=skip_archive,
session=session,
)
session.commit()
else:
logger.warning("Table %s not found. Skipping.", table_name) |
Export archived data to the given output path in the given format. | def export_archived_records(
export_format,
output_path,
table_names=None,
drop_archives=False,
needs_confirm=True,
session: Session = NEW_SESSION,
):
"""Export archived data to the given output path in the given format."""
archived_table_names = _get_archived_table_names(table_names, session)
# If user chose to drop archives, check there are archive tables that exists
# before asking for confirmation
if drop_archives and archived_table_names and needs_confirm:
_confirm_drop_archives(tables=sorted(archived_table_names))
export_count = 0
dropped_count = 0
for table_name in archived_table_names:
logger.info("Exporting table %s", table_name)
_dump_table_to_file(
target_table=table_name,
file_path=os.path.join(output_path, f"{table_name}.{export_format}"),
export_format=export_format,
session=session,
)
export_count += 1
if drop_archives:
logger.info("Dropping archived table %s", table_name)
session.execute(text(f"DROP TABLE {table_name}"))
dropped_count += 1
logger.info("Total exported tables: %s, Total dropped tables: %s", export_count, dropped_count) |
Drop archived tables. | def drop_archived_tables(table_names, needs_confirm, session):
"""Drop archived tables."""
archived_table_names = _get_archived_table_names(table_names, session)
if needs_confirm and archived_table_names:
_confirm_drop_archives(tables=sorted(archived_table_names))
dropped_count = 0
for table_name in archived_table_names:
logger.info("Dropping archived table %s", table_name)
session.execute(text(f"DROP TABLE {table_name}"))
dropped_count += 1
logger.info("Total dropped tables: %s", dropped_count) |
Use apply_default decorator for the `default_args` feature to work properly; deprecated.
In previous versions, all subclasses of BaseOperator must use apply_default decorator for the"
`default_args` feature to work properly.
In current version, it is optional. The decorator is applied automatically using the metaclass. | def apply_defaults(func: T) -> T:
"""
Use apply_default decorator for the `default_args` feature to work properly; deprecated.
In previous versions, all subclasses of BaseOperator must use apply_default decorator for the"
`default_args` feature to work properly.
In current version, it is optional. The decorator is applied automatically using the metaclass.
"""
warnings.warn(
"This decorator is deprecated. \n"
"\n"
"In previous versions, all subclasses of BaseOperator must use apply_default decorator for the "
"`default_args` feature to work properly.\n"
"\n"
"In current version, it is optional. The decorator is applied automatically using the metaclass.\n",
RemovedInAirflow3Warning,
stacklevel=3,
)
# Make it still be a wrapper to keep the previous behaviour of an extra stack frame
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return cast(T, wrapper) |
Remove @task or similar decorators as well as @setup and @teardown.
:param python_source: The python source code
:param task_decorator_name: the decorator name
TODO: Python 3.9+: Rewrite this to use ast.parse and ast.unparse | def remove_task_decorator(python_source: str, task_decorator_name: str) -> str:
"""
Remove @task or similar decorators as well as @setup and @teardown.
:param python_source: The python source code
:param task_decorator_name: the decorator name
TODO: Python 3.9+: Rewrite this to use ast.parse and ast.unparse
"""
def _remove_task_decorator(py_source, decorator_name):
# if no line starts with @decorator_name, we can early exit
for line in py_source.split("\n"):
if line.startswith(decorator_name):
break
else:
return python_source
split = python_source.split(decorator_name, 1)
before_decorator, after_decorator = split[0], split[1]
if after_decorator[0] == "(":
after_decorator = _balance_parens(after_decorator)
if after_decorator[0] == "\n":
after_decorator = after_decorator[1:]
return before_decorator + after_decorator
decorators = ["@setup", "@teardown", task_decorator_name]
for decorator in decorators:
python_source = _remove_task_decorator(python_source, decorator)
return python_source |
Retrieve the imported attribute from the redirected module and raises a deprecation warning.
:param imports: dict of imports and their redirection for the module
:param module: name of the module in the package to get the attribute from
:param override_deprecated_classes: override target classes with deprecated ones. If target class is
found in the dictionary, it will be displayed in the warning message.
:param extra_message: extra message to display in the warning or import error message
:param name: attribute name
:return: | def getattr_with_deprecation(
imports: dict[str, str],
module: str,
override_deprecated_classes: dict[str, str],
extra_message: str,
name: str,
):
"""
Retrieve the imported attribute from the redirected module and raises a deprecation warning.
:param imports: dict of imports and their redirection for the module
:param module: name of the module in the package to get the attribute from
:param override_deprecated_classes: override target classes with deprecated ones. If target class is
found in the dictionary, it will be displayed in the warning message.
:param extra_message: extra message to display in the warning or import error message
:param name: attribute name
:return:
"""
target_class_full_name = imports.get(name)
if not target_class_full_name:
raise AttributeError(f"The module `{module!r}` has no attribute `{name!r}`")
warning_class_name = target_class_full_name
if override_deprecated_classes and name in override_deprecated_classes:
warning_class_name = override_deprecated_classes[name]
message = f"The `{module}.{name}` class is deprecated. Please use `{warning_class_name!r}`."
if extra_message:
message += f" {extra_message}."
warnings.warn(message, DeprecationWarning, stacklevel=2)
new_module, new_class_name = target_class_full_name.rsplit(".", 1)
try:
return getattr(importlib.import_module(new_module), new_class_name)
except ImportError as e:
error_message = (
f"Could not import `{new_module}.{new_class_name}` while trying to import `{module}.{name}`."
)
if extra_message:
error_message += f" {extra_message}."
raise ImportError(error_message) from e |
Add deprecated class PEP-563 imports and warnings modules to the package.
:param module_imports: imports to use
:param package: package name
:param override_deprecated_classes: override target classes with deprecated ones. If module +
target class is found in the dictionary, it will be displayed in the warning message.
:param extra_message: extra message to display in the warning or import error message | def add_deprecated_classes(
module_imports: dict[str, dict[str, str]],
package: str,
override_deprecated_classes: dict[str, dict[str, str]] | None = None,
extra_message: str | None = None,
):
"""
Add deprecated class PEP-563 imports and warnings modules to the package.
:param module_imports: imports to use
:param package: package name
:param override_deprecated_classes: override target classes with deprecated ones. If module +
target class is found in the dictionary, it will be displayed in the warning message.
:param extra_message: extra message to display in the warning or import error message
"""
for module_name, imports in module_imports.items():
full_module_name = f"{package}.{module_name}"
module_type = ModuleType(full_module_name)
if override_deprecated_classes and module_name in override_deprecated_classes:
override_deprecated_classes_for_module = override_deprecated_classes[module_name]
else:
override_deprecated_classes_for_module = {}
# Mypy is not able to derive the right function signature https://github.com/python/mypy/issues/2427
module_type.__getattr__ = functools.partial( # type: ignore[assignment]
getattr_with_deprecation,
imports,
full_module_name,
override_deprecated_classes_for_module,
extra_message or "",
)
sys.modules.setdefault(full_module_name, module_type) |
Prepare link to Airflow documentation. | def get_docs_url(page: str | None = None) -> str:
"""Prepare link to Airflow documentation."""
from airflow.version import version
if any(suffix in version for suffix in ["dev", "a", "b"]):
result = (
"http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com/docs/apache-airflow/stable/"
)
else:
result = f"https://airflow.apache.org/docs/apache-airflow/{version}/"
if page:
result = result + page
return result |
Prepare link to Airflow Provider documentation. | def get_doc_url_for_provider(provider_name: str, provider_version: str) -> str:
"""Prepare link to Airflow Provider documentation."""
try:
metadata_items = metadata.metadata(provider_name).get_all("Project-URL")
if isinstance(metadata_items, str):
metadata_items = [metadata_items]
if metadata_items:
for item in metadata_items:
if item.lower().startswith("documentation"):
_, _, url = item.partition(",")
if url:
return url.strip()
except metadata.PackageNotFoundError:
pass
# Fallback if provider is apache one
if provider_name.startswith("apache-airflow"):
return f"https://airflow.apache.org/docs/{provider_name}/{provider_version}/"
return "https://airflow.apache.org/docs/apache-airflow-providers/index.html#creating-your-own-providers" |
Convert color in #RGB (12 bits) format to #RRGGBB (32 bits), if it possible.
Otherwise, it returns the original value. Graphviz does not support colors in #RGB format.
:param color: Text representation of color
:return: Refined representation of color | def _refine_color(color: str):
"""
Convert color in #RGB (12 bits) format to #RRGGBB (32 bits), if it possible.
Otherwise, it returns the original value. Graphviz does not support colors in #RGB format.
:param color: Text representation of color
:return: Refined representation of color
"""
if len(color) == 4 and color[0] == "#":
color_r = color[1]
color_g = color[2]
color_b = color[3]
return "#" + color_r + color_r + color_g + color_g + color_b + color_b
return color |
Draw a single task on the given parent_graph. | def _draw_task(
task: MappedOperator | BaseOperator,
parent_graph: graphviz.Digraph,
states_by_task_id: dict[Any, Any] | None,
) -> None:
"""Draw a single task on the given parent_graph."""
if states_by_task_id:
state = states_by_task_id.get(task.task_id)
color = State.color_fg(state)
fill_color = State.color(state)
else:
color = task.ui_fgcolor
fill_color = task.ui_color
parent_graph.node(
task.task_id,
_attributes={
"label": task.label,
"shape": "rectangle",
"style": "filled,rounded",
"color": _refine_color(color),
"fillcolor": _refine_color(fill_color),
},
) |
Draw the given task_group and its children on the given parent_graph. | def _draw_task_group(
task_group: TaskGroup, parent_graph: graphviz.Digraph, states_by_task_id: dict[str, str] | None
) -> None:
"""Draw the given task_group and its children on the given parent_graph."""
# Draw joins
if task_group.upstream_group_ids or task_group.upstream_task_ids:
parent_graph.node(
task_group.upstream_join_id,
_attributes={
"label": "",
"shape": "circle",
"style": "filled,rounded",
"color": _refine_color(task_group.ui_fgcolor),
"fillcolor": _refine_color(task_group.ui_color),
"width": "0.2",
"height": "0.2",
},
)
if task_group.downstream_group_ids or task_group.downstream_task_ids:
parent_graph.node(
task_group.downstream_join_id,
_attributes={
"label": "",
"shape": "circle",
"style": "filled,rounded",
"color": _refine_color(task_group.ui_fgcolor),
"fillcolor": _refine_color(task_group.ui_color),
"width": "0.2",
"height": "0.2",
},
)
# Draw children
for child in sorted(task_group.children.values(), key=lambda t: t.node_id if t.node_id else ""):
_draw_nodes(child, parent_graph, states_by_task_id) |
Draw the node and its children on the given parent_graph recursively. | def _draw_nodes(
node: DependencyMixin, parent_graph: graphviz.Digraph, states_by_task_id: dict[str, str] | None
) -> None:
"""Draw the node and its children on the given parent_graph recursively."""
if isinstance(node, (BaseOperator, MappedOperator)):
_draw_task(node, parent_graph, states_by_task_id)
else:
if not isinstance(node, TaskGroup):
raise AirflowException(f"The node {node} should be TaskGroup and is not")
# Draw TaskGroup
if node.is_root:
# No need to draw background for root TaskGroup.
_draw_task_group(node, parent_graph, states_by_task_id)
else:
with parent_graph.subgraph(name=f"cluster_{node.group_id}") as sub:
sub.attr(
shape="rectangle",
style="filled",
color=_refine_color(node.ui_fgcolor),
# Partially transparent CornflowerBlue
fillcolor="#6495ed7f",
label=node.label,
)
_draw_task_group(node, sub, states_by_task_id) |
Render the DAG dependency to the DOT object.
:param deps: List of DAG dependencies
:return: Graphviz object | def render_dag_dependencies(deps: dict[str, list[DagDependency]]) -> graphviz.Digraph:
"""
Render the DAG dependency to the DOT object.
:param deps: List of DAG dependencies
:return: Graphviz object
"""
if not graphviz:
raise AirflowException(
"Could not import graphviz. Install the graphviz python package to fix this error."
)
dot = graphviz.Digraph(graph_attr={"rankdir": "LR"})
for dag, dependencies in deps.items():
for dep in dependencies:
with dot.subgraph(
name=dag,
graph_attr={
"rankdir": "LR",
"labelloc": "t",
"label": dag,
},
) as dep_subgraph:
dep_subgraph.edge(dep.source, dep.dependency_id)
dep_subgraph.edge(dep.dependency_id, dep.target)
return dot |
Render the DAG object to the DOT object.
If an task instance list is passed, the nodes will be painted according to task statuses.
:param dag: DAG that will be rendered.
:param tis: List of task instances
:return: Graphviz object | def render_dag(dag: DAG, tis: list[TaskInstance] | None = None) -> graphviz.Digraph:
"""
Render the DAG object to the DOT object.
If an task instance list is passed, the nodes will be painted according to task statuses.
:param dag: DAG that will be rendered.
:param tis: List of task instances
:return: Graphviz object
"""
if not graphviz:
raise AirflowException(
"Could not import graphviz. Install the graphviz python package to fix this error."
)
dot = graphviz.Digraph(
dag.dag_id,
graph_attr={
"rankdir": dag.orientation if dag.orientation else "LR",
"labelloc": "t",
"label": dag.dag_id,
},
)
states_by_task_id = None
if tis is not None:
states_by_task_id = {ti.task_id: ti.state for ti in tis}
_draw_nodes(dag.task_group, dot, states_by_task_id)
for edge in dag_edges(dag):
# Gets an optional label for the edge; this will be None if none is specified.
label = dag.get_edge_info(edge["source_id"], edge["target_id"]).get("label")
# Add the edge to the graph with optional label
# (we can just use the maybe-None label variable directly)
dot.edge(edge["source_id"], edge["target_id"], label)
return dot |
Create an EdgeModifier that sets a human-readable label on the edge. | def Label(label: str):
"""Create an EdgeModifier that sets a human-readable label on the edge."""
return EdgeModifier(label=label) |
Send an email using the backend specified in the *EMAIL_BACKEND* configuration option.
:param to: A list or iterable of email addresses to send the email to.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to attach to the email.
:param dryrun: If *True*, the email will not actually be sent. Default: *False*.
:param cc: A string or iterable of strings containing email addresses to send a copy of the email to.
:param bcc: A string or iterable of strings containing email addresses to send a
blind carbon copy of the email to.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param conn_id: The connection ID to use for the backend. If not provided, the default connection
specified in the *EMAIL_CONN_ID* configuration option will be used.
:param custom_headers: A dictionary of additional headers to add to the MIME message.
No validations are run on these values, and they should be able to be encoded.
:param kwargs: Additional keyword arguments to pass to the backend. | def send_email(
to: list[str] | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
dryrun: bool = False,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
conn_id: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""
Send an email using the backend specified in the *EMAIL_BACKEND* configuration option.
:param to: A list or iterable of email addresses to send the email to.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to attach to the email.
:param dryrun: If *True*, the email will not actually be sent. Default: *False*.
:param cc: A string or iterable of strings containing email addresses to send a copy of the email to.
:param bcc: A string or iterable of strings containing email addresses to send a
blind carbon copy of the email to.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param conn_id: The connection ID to use for the backend. If not provided, the default connection
specified in the *EMAIL_CONN_ID* configuration option will be used.
:param custom_headers: A dictionary of additional headers to add to the MIME message.
No validations are run on these values, and they should be able to be encoded.
:param kwargs: Additional keyword arguments to pass to the backend.
"""
backend = conf.getimport("email", "EMAIL_BACKEND")
backend_conn_id = conn_id or conf.get("email", "EMAIL_CONN_ID")
from_email = conf.get("email", "from_email", fallback=None)
to_list = get_email_address_list(to)
to_comma_separated = ", ".join(to_list)
return backend(
to_comma_separated,
subject,
html_content,
files=files,
dryrun=dryrun,
cc=cc,
bcc=bcc,
mime_subtype=mime_subtype,
mime_charset=mime_charset,
conn_id=backend_conn_id,
from_email=from_email,
custom_headers=custom_headers,
**kwargs,
) |
Subsets and Splits