response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
List all hooks at the command line. | def hooks_list(args):
"""List all hooks at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().hooks.items()),
output=args.output,
mapper=lambda x: {
"connection_type": x[0],
"class": x[1].hook_class_name if x[1] else ERROR_IMPORTING_HOOK,
"conn_id_attribute_name": x[1].connection_id_attribute_name if x[1] else ERROR_IMPORTING_HOOK,
"package_name": x[1].package_name if x[1] else ERROR_IMPORTING_HOOK,
"hook_name": x[1].hook_name if x[1] else ERROR_IMPORTING_HOOK,
},
) |
List all custom connection form fields at the command line. | def connection_form_widget_list(args):
"""List all custom connection form fields at the command line."""
AirflowConsole().print_as(
data=sorted(ProvidersManager().connection_form_widgets.items()),
output=args.output,
mapper=lambda x: {
"connection_parameter_name": x[0],
"class": x[1].hook_class_name,
"package_name": x[1].package_name,
"field_type": x[1].field.field_class.__name__,
},
) |
List field behaviours. | def connection_field_behaviours(args):
"""List field behaviours."""
AirflowConsole().print_as(
data=list(ProvidersManager().field_behaviours),
output=args.output,
mapper=lambda x: {
"field_behaviours": x,
},
) |
List all extra links at the command line. | def extra_links_list(args):
"""List all extra links at the command line."""
AirflowConsole().print_as(
data=ProvidersManager().extra_links_class_names,
output=args.output,
mapper=lambda x: {
"extra_link_class_name": x,
},
) |
List all log task handlers at the command line. | def logging_list(args):
"""List all log task handlers at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().logging_class_names),
output=args.output,
mapper=lambda x: {
"logging_class_name": x,
},
) |
List all secrets backends at the command line. | def secrets_backends_list(args):
"""List all secrets backends at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().secrets_backend_class_names),
output=args.output,
mapper=lambda x: {
"secrets_backend_class_name": x,
},
) |
List all API auth backend modules at the command line. | def auth_backend_list(args):
"""List all API auth backend modules at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().auth_backend_module_names),
output=args.output,
mapper=lambda x: {
"api_auth_backend_module": x,
},
) |
List all auth managers at the command line. | def auth_managers_list(args):
"""List all auth managers at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().auth_managers),
output=args.output,
mapper=lambda x: {
"auth_managers_module": x,
},
) |
List all executors at the command line. | def executors_list(args):
"""List all executors at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().executor_class_names),
output=args.output,
mapper=lambda x: {
"executor_class_names": x,
},
) |
List all configurations at the command line. | def config_list(args):
"""List all configurations at the command line."""
AirflowConsole().print_as(
data=list(ProvidersManager().provider_configs),
output=args.output,
mapper=lambda x: {
"provider_config": x,
},
) |
Informs if providers manager has been initialized too early.
If provider is initialized, shows the stack trace and exit with error code 1. | def lazy_loaded(args):
"""Informs if providers manager has been initialized too early.
If provider is initialized, shows the stack trace and exit with error code 1.
"""
import rich
if ProvidersManager.initialized():
rich.print(
"\n[red]ProvidersManager was initialized during CLI parsing. This should not happen.\n",
file=sys.stderr,
)
rich.print(
"\n[yellow]Please make sure no Providers Manager initialization happens during CLI parsing.\n",
file=sys.stderr,
)
rich.print("Stack trace where it has been initialized:\n", file=sys.stderr)
rich.print(ProvidersManager.initialization_stack_trace(), file=sys.stderr)
sys.exit(1)
else:
rich.print("[green]All ok. Providers Manager was not initialized during the CLI parsing.")
sys.exit(0) |
Rotates all encrypted connection credentials and variables. | def rotate_fernet_key(args):
"""Rotates all encrypted connection credentials and variables."""
with create_session() as session:
conns_query = select(Connection).where(Connection.is_encrypted | Connection.is_extra_encrypted)
for conn in session.scalars(conns_query):
conn.rotate_fernet_key()
for var in session.scalars(select(Variable).where(Variable.is_encrypted)):
var.rotate_fernet_key()
for trigger in session.scalars(select(Trigger)):
trigger.rotate_fernet_key() |
Start Airflow Scheduler. | def scheduler(args: Namespace):
"""Start Airflow Scheduler."""
print(settings.HEADER)
run_command_with_daemon_option(
args=args,
process_name="scheduler",
callback=lambda: _run_scheduler_job(args),
should_setup_logging=True,
) |
Start serve_logs sub-process. | def _serve_logs(skip_serve_logs: bool = False):
"""Start serve_logs sub-process."""
from airflow.utils.serve_logs import serve_logs
sub_proc = None
executor_class, _ = ExecutorLoader.import_default_executor_cls()
if executor_class.serve_logs:
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
try:
yield
finally:
if sub_proc:
sub_proc.terminate() |
Start serve_health_check sub-process. | def _serve_health_check(enable_health_check: bool = False):
"""Start serve_health_check sub-process."""
sub_proc = None
if enable_health_check:
sub_proc = Process(target=serve_health_check)
sub_proc.start()
try:
yield
finally:
if sub_proc:
sub_proc.terminate() |
Generate a ``run_id`` for a DAG run that will be created temporarily.
This is used mostly by ``airflow task test`` to create a DAG run that will
be deleted after the task is run. | def _generate_temporary_run_id() -> str:
"""Generate a ``run_id`` for a DAG run that will be created temporarily.
This is used mostly by ``airflow task test`` to create a DAG run that will
be deleted after the task is run.
"""
return f"__airflow_temporary_run_{timezone.utcnow().isoformat()}__" |
Try to retrieve a DAG run from a string representing either a run ID or logical date.
This checks DAG runs like this:
1. If the input ``exec_date_or_run_id`` matches a DAG run ID, return the run.
2. Try to parse the input as a date. If that works, and the resulting
date matches a DAG run's logical date, return the run.
3. If ``create_if_necessary`` is *False* and the input works for neither of
the above, raise ``DagRunNotFound``.
4. Try to create a new DAG run. If the input looks like a date, use it as
the logical date; otherwise use it as a run ID and set the logical date
to the current time. | def _get_dag_run(
*,
dag: DAG,
create_if_necessary: CreateIfNecessary,
exec_date_or_run_id: str | None = None,
session: Session | None = None,
) -> tuple[DagRun | DagRunPydantic, bool]:
"""Try to retrieve a DAG run from a string representing either a run ID or logical date.
This checks DAG runs like this:
1. If the input ``exec_date_or_run_id`` matches a DAG run ID, return the run.
2. Try to parse the input as a date. If that works, and the resulting
date matches a DAG run's logical date, return the run.
3. If ``create_if_necessary`` is *False* and the input works for neither of
the above, raise ``DagRunNotFound``.
4. Try to create a new DAG run. If the input looks like a date, use it as
the logical date; otherwise use it as a run ID and set the logical date
to the current time.
"""
if not exec_date_or_run_id and not create_if_necessary:
raise ValueError("Must provide `exec_date_or_run_id` if not `create_if_necessary`.")
execution_date: pendulum.DateTime | None = None
if exec_date_or_run_id:
dag_run = DAG.fetch_dagrun(dag_id=dag.dag_id, run_id=exec_date_or_run_id, session=session)
if dag_run:
return dag_run, False
with suppress(ParserError, TypeError):
execution_date = timezone.parse(exec_date_or_run_id)
if execution_date:
dag_run = DAG.fetch_dagrun(dag_id=dag.dag_id, execution_date=execution_date, session=session)
if dag_run:
return dag_run, False
elif not create_if_necessary:
raise DagRunNotFound(
f"DagRun for {dag.dag_id} with run_id or execution_date "
f"of {exec_date_or_run_id!r} not found"
)
if execution_date is not None:
dag_run_execution_date = execution_date
else:
dag_run_execution_date = pendulum.instance(timezone.utcnow())
if create_if_necessary == "memory":
dag_run = DagRun(
dag_id=dag.dag_id,
run_id=exec_date_or_run_id,
execution_date=dag_run_execution_date,
data_interval=dag.timetable.infer_manual_data_interval(run_after=dag_run_execution_date),
)
return dag_run, True
elif create_if_necessary == "db":
dag_run = dag.create_dagrun(
state=DagRunState.QUEUED,
execution_date=dag_run_execution_date,
run_id=_generate_temporary_run_id(),
data_interval=dag.timetable.infer_manual_data_interval(run_after=dag_run_execution_date),
session=session,
)
return dag_run, True
raise ValueError(f"unknown create_if_necessary value: {create_if_necessary!r}") |
Get the task instance through DagRun.run_id, if that fails, get the TI the old way. | def _get_ti_db_access(
dag: DAG,
task: Operator,
map_index: int,
*,
exec_date_or_run_id: str | None = None,
pool: str | None = None,
create_if_necessary: CreateIfNecessary = False,
session: Session = NEW_SESSION,
) -> tuple[TaskInstance | TaskInstancePydantic, bool]:
"""Get the task instance through DagRun.run_id, if that fails, get the TI the old way."""
# this check is imperfect because diff dags could have tasks with same name
# but in a task, dag_id is a property that accesses its dag, and we don't
# currently include the dag when serializing an operator
if task.task_id not in dag.task_dict:
raise ValueError(f"Provided task {task.task_id} is not in dag '{dag.dag_id}.")
if not exec_date_or_run_id and not create_if_necessary:
raise ValueError("Must provide `exec_date_or_run_id` if not `create_if_necessary`.")
if needs_expansion(task):
if map_index < 0:
raise RuntimeError("No map_index passed to mapped task")
elif map_index >= 0:
raise RuntimeError("map_index passed to non-mapped task")
dag_run, dr_created = _get_dag_run(
dag=dag,
exec_date_or_run_id=exec_date_or_run_id,
create_if_necessary=create_if_necessary,
session=session,
)
ti_or_none = dag_run.get_task_instance(task.task_id, map_index=map_index, session=session)
ti: TaskInstance | TaskInstancePydantic
if ti_or_none is None:
if not create_if_necessary:
raise TaskInstanceNotFound(
f"TaskInstance for {dag.dag_id}, {task.task_id}, map={map_index} with "
f"run_id or execution_date of {exec_date_or_run_id!r} not found"
)
# TODO: Validate map_index is in range?
ti = TaskInstance(task, run_id=dag_run.run_id, map_index=map_index)
ti.dag_run = dag_run
else:
ti = ti_or_none
ti.refresh_from_task(task, pool_override=pool)
return ti, dr_created |
Run the task based on a mode.
Any of the 3 modes are available:
- using LocalTaskJob
- as raw task
- by executor | def _run_task_by_selected_method(
args, dag: DAG, ti: TaskInstance | TaskInstancePydantic
) -> None | TaskReturnCode:
"""
Run the task based on a mode.
Any of the 3 modes are available:
- using LocalTaskJob
- as raw task
- by executor
"""
if TYPE_CHECKING:
assert not isinstance(ti, TaskInstancePydantic) # Wait for AIP-44 implementation to complete
if args.local:
return _run_task_by_local_task_job(args, ti)
if args.raw:
return _run_raw_task(args, ti)
_run_task_by_executor(args, dag, ti)
return None |
Send the task to the executor for execution.
This can result in the task being started by another host if the executor implementation does. | def _run_task_by_executor(args, dag: DAG, ti: TaskInstance) -> None:
"""
Send the task to the executor for execution.
This can result in the task being started by another host if the executor implementation does.
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
pickle_id = pickle.id
# TODO: This should be written to a log
print(f"Pickled dag {dag} as pickle_id: {pickle_id}")
except Exception as e:
print("Could not pickle the DAG")
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.job_id = None
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=should_ignore_depends_on_past(args),
wait_for_past_depends_before_skipping=(args.depends_on_past == "wait"),
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end() |
Run LocalTaskJob, which monitors the raw task execution process. | def _run_task_by_local_task_job(args, ti: TaskInstance | TaskInstancePydantic) -> TaskReturnCode | None:
"""Run LocalTaskJob, which monitors the raw task execution process."""
if InternalApiConfig.get_use_internal_api():
from airflow.models.renderedtifields import RenderedTaskInstanceFields # noqa: F401
from airflow.models.trigger import Trigger # noqa: F401
job_runner = LocalTaskJobRunner(
job=Job(dag_id=ti.dag_id),
task_instance=ti,
mark_success=args.mark_success,
pickle_id=args.pickle,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=should_ignore_depends_on_past(args),
wait_for_past_depends_before_skipping=(args.depends_on_past == "wait"),
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
external_executor_id=_extract_external_executor_id(args),
)
try:
ret = run_job(job=job_runner.job, execute_callable=job_runner._execute)
finally:
if args.shut_down_logging:
logging.shutdown()
with suppress(ValueError):
return TaskReturnCode(ret)
return None |
Run the main task handling code. | def _run_raw_task(args, ti: TaskInstance) -> None | TaskReturnCode:
"""Run the main task handling code."""
return ti._run_raw_task(
mark_success=args.mark_success,
job_id=args.job_id,
pool=args.pool,
) |
Move handlers for task logging to root logger.
We want anything logged during task run to be propagated to task log handlers.
If running in a k8s executor pod, also keep the stream handler on root logger
so that logs are still emitted to stdout. | def _move_task_handlers_to_root(ti: TaskInstance | TaskInstancePydantic) -> Generator[None, None, None]:
"""
Move handlers for task logging to root logger.
We want anything logged during task run to be propagated to task log handlers.
If running in a k8s executor pod, also keep the stream handler on root logger
so that logs are still emitted to stdout.
"""
# nothing to do
if not ti.log.handlers or settings.DONOT_MODIFY_HANDLERS:
yield
return
# Move task handlers to root and reset task logger and restore original logger settings after exit.
# If k8s executor, we need to ensure that root logger has a console handler, so that
# task logs propagate to stdout (this is how webserver retrieves them while task is running).
root_logger = logging.getLogger()
console_handler = next((h for h in root_logger.handlers if h.name == "console"), None)
with LoggerMutationHelper(root_logger), LoggerMutationHelper(ti.log) as task_helper:
task_helper.move(root_logger)
if IS_K8S_EXECUTOR_POD or IS_EXECUTOR_CONTAINER:
if console_handler and console_handler not in root_logger.handlers:
root_logger.addHandler(console_handler)
yield |
Redirect stdout to ti logger.
Redirect stdout and stderr to the task instance log as INFO and WARNING
level messages, respectively.
If stdout already redirected (possible when task running with option
`--local`), don't redirect again. | def _redirect_stdout_to_ti_log(ti: TaskInstance | TaskInstancePydantic) -> Generator[None, None, None]:
"""
Redirect stdout to ti logger.
Redirect stdout and stderr to the task instance log as INFO and WARNING
level messages, respectively.
If stdout already redirected (possible when task running with option
`--local`), don't redirect again.
"""
# if sys.stdout is StreamLogWriter, it means we already redirected
# likely before forking in LocalTaskJob
if not isinstance(sys.stdout, StreamLogWriter):
info_writer = StreamLogWriter(ti.log, logging.INFO)
warning_writer = StreamLogWriter(ti.log, logging.WARNING)
with redirect_stdout(info_writer), redirect_stderr(warning_writer):
yield
else:
yield |
Run a single task instance.
Note that there must be at least one DagRun for this to start,
i.e. it must have been scheduled and/or triggered previously.
Alternatively, if you just need to run it for testing then use
"airflow tasks test ..." command instead. | def task_run(args, dag: DAG | None = None) -> TaskReturnCode | None:
"""
Run a single task instance.
Note that there must be at least one DagRun for this to start,
i.e. it must have been scheduled and/or triggered previously.
Alternatively, if you just need to run it for testing then use
"airflow tasks test ..." command instead.
"""
# Load custom airflow config
if args.local and args.raw:
raise AirflowException(
"Option --raw and --local are mutually exclusive. "
"Please remove one option to execute the command."
)
if args.raw:
unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]
if unsupported_options:
unsupported_raw_task_flags = ", ".join(f"--{o}" for o in RAW_TASK_UNSUPPORTED_OPTION)
unsupported_flags = ", ".join(f"--{o}" for o in unsupported_options)
raise AirflowException(
"Option --raw does not work with some of the other options on this command. "
"You can't use --raw option and the following options: "
f"{unsupported_raw_task_flags}. "
f"You provided the option {unsupported_flags}. "
"Delete it to execute the command."
)
if dag and args.pickle:
raise AirflowException("You cannot use the --pickle option when using DAG.cli() method.")
if args.cfg_path:
with open(args.cfg_path) as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
conf.read_dict(conf_dict, source=args.cfg_path)
settings.configure_vars()
settings.MASK_SECRETS_IN_LOGS = True
get_listener_manager().hook.on_starting(component=TaskCommandMarker())
if args.pickle:
print(f"Loading pickle id: {args.pickle}")
_dag = get_dag_by_pickle(args.pickle)
elif not dag:
_dag = get_dag(args.subdir, args.dag_id, args.read_from_db)
else:
_dag = dag
task = _dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id, pool=args.pool)
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
log.info("Running %s on host %s", ti, hostname)
# IMPORTANT, have to re-configure ORM with the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
# this should be last thing before running, to reduce likelihood of an open session
# which can cause trouble if running process in a fork.
settings.reconfigure_orm(disable_connection_pool=True)
task_return_code = None
try:
if args.interactive:
task_return_code = _run_task_by_selected_method(args, _dag, ti)
else:
with _move_task_handlers_to_root(ti), _redirect_stdout_to_ti_log(ti):
task_return_code = _run_task_by_selected_method(args, _dag, ti)
if task_return_code == TaskReturnCode.DEFERRED:
_set_task_deferred_context_var()
finally:
try:
get_listener_manager().hook.before_stopping(component=TaskCommandMarker())
except Exception:
pass
return task_return_code |
Get task instance dependencies that were not met.
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es). | def task_failed_deps(args) -> None:
"""
Get task instance dependencies that were not met.
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id)
# tasks_failed-deps is executed with access to the database.
if isinstance(ti, TaskInstancePydantic):
raise ValueError("not a TaskInstance")
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.") |
Return the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success | def task_state(args) -> None:
"""
Return the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id)
# task_state is executed with access to the database.
if isinstance(ti, TaskInstancePydantic):
raise ValueError("not a TaskInstance")
print(ti.current_state()) |
List the tasks within a DAG at the command line. | def task_list(args, dag: DAG | None = None) -> None:
"""List the tasks within a DAG at the command line."""
dag = dag or get_dag(args.subdir, args.dag_id)
if args.tree:
dag.tree_view()
else:
tasks = sorted(t.task_id for t in dag.tasks)
print("\n".join(tasks)) |
Try to guess the debugger used by the user.
When it doesn't find any user-installed debugger, returns ``pdb``.
List of supported debuggers:
* `pudb <https://github.com/inducer/pudb>`__
* `web_pdb <https://github.com/romanvm/python-web-pdb>`__
* `ipdb <https://github.com/gotcha/ipdb>`__
* `pdb <https://docs.python.org/3/library/pdb.html>`__ | def _guess_debugger() -> _SupportedDebugger:
"""
Try to guess the debugger used by the user.
When it doesn't find any user-installed debugger, returns ``pdb``.
List of supported debuggers:
* `pudb <https://github.com/inducer/pudb>`__
* `web_pdb <https://github.com/romanvm/python-web-pdb>`__
* `ipdb <https://github.com/gotcha/ipdb>`__
* `pdb <https://docs.python.org/3/library/pdb.html>`__
"""
exc: Exception
for mod_name in SUPPORTED_DEBUGGER_MODULES:
try:
return cast(_SupportedDebugger, importlib.import_module(mod_name))
except ImportError as e:
exc = e
raise exc |
Get the status of all task instances in a DagRun. | def task_states_for_dag_run(args, session: Session = NEW_SESSION) -> None:
"""Get the status of all task instances in a DagRun."""
dag_run = session.scalar(
select(DagRun).where(DagRun.run_id == args.execution_date_or_run_id, DagRun.dag_id == args.dag_id)
)
if not dag_run:
try:
execution_date = timezone.parse(args.execution_date_or_run_id)
dag_run = session.scalar(
select(DagRun).where(DagRun.execution_date == execution_date, DagRun.dag_id == args.dag_id)
)
except (ParserError, TypeError) as err:
raise AirflowException(f"Error parsing the supplied execution_date. Error: {err}")
if dag_run is None:
raise DagRunNotFound(
f"DagRun for {args.dag_id} with run_id or execution_date of {args.execution_date_or_run_id!r} "
"not found"
)
has_mapped_instances = any(ti.map_index >= 0 for ti in dag_run.task_instances)
def format_task_instance(ti: TaskInstance) -> dict[str, str]:
data = {
"dag_id": ti.dag_id,
"execution_date": dag_run.execution_date.isoformat(),
"task_id": ti.task_id,
"state": ti.state,
"start_date": ti.start_date.isoformat() if ti.start_date else "",
"end_date": ti.end_date.isoformat() if ti.end_date else "",
}
if has_mapped_instances:
data["map_index"] = str(ti.map_index) if ti.map_index >= 0 else ""
return data
AirflowConsole().print_as(data=dag_run.task_instances, output=args.output, mapper=format_task_instance) |
Test task for a given dag_id. | def task_test(args, dag: DAG | None = None, session: Session = NEW_SESSION) -> None:
"""Test task for a given dag_id."""
# We want to log output from operators etc to show up here. Normally
# airflow.task would redirect to a file, but here we want it to propagate
# up to the normal airflow handler.
settings.MASK_SECRETS_IN_LOGS = True
handlers = logging.getLogger("airflow.task").handlers
already_has_stream_handler = False
for handler in handlers:
already_has_stream_handler = isinstance(handler, logging.StreamHandler)
if already_has_stream_handler:
break
if not already_has_stream_handler:
logging.getLogger("airflow.task").propagate = True
env_vars = {"AIRFLOW_TEST_MODE": "True"}
if args.env_vars:
env_vars.update(args.env_vars)
os.environ.update(env_vars)
dag = dag or get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
if task.params and isinstance(task.params, ParamsDict):
task.params.validate()
ti, dr_created = _get_ti(
task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id, create_if_necessary="db"
)
# task_test is executed with access to the database.
if isinstance(ti, TaskInstancePydantic):
raise ValueError("not a TaskInstance")
try:
with redirect_stdout(RedactedIO()):
if args.dry_run:
ti.dry_run()
else:
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True, raise_on_defer=True)
except TaskDeferred as defer:
ti.defer_task(defer=defer, session=session)
log.info("[TASK TEST] running trigger in line")
event = _run_inline_trigger(defer.trigger)
ti.next_method = defer.method_name
ti.next_kwargs = {"event": event.payload} if event else defer.kwargs
execute_callable = getattr(task, ti.next_method)
if ti.next_kwargs:
execute_callable = functools.partial(execute_callable, **ti.next_kwargs)
context = ti.get_template_context(ignore_param_exceptions=False)
execute_callable(context)
log.info("[TASK TEST] Trigger completed")
except Exception:
if args.post_mortem:
debugger = _guess_debugger()
debugger.post_mortem()
else:
raise
finally:
if not already_has_stream_handler:
# Make sure to reset back to normal. When run for CLI this doesn't
# matter, but it does for test suite
logging.getLogger("airflow.task").propagate = False
if dr_created:
with create_session() as session:
session.delete(ti.dag_run) |
Render and displays templated fields for a given task. | def task_render(args, dag: DAG | None = None) -> None:
"""Render and displays templated fields for a given task."""
if not dag:
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(
task, args.map_index, exec_date_or_run_id=args.execution_date_or_run_id, create_if_necessary="memory"
)
# task_render is executed with access to the database.
if isinstance(ti, TaskInstancePydantic):
raise ValueError("not a TaskInstance")
with create_session() as session, set_current_task_instance_session(session=session):
ti.render_templates()
for attr in task.template_fields:
print(
textwrap.dedent(
f""" # ----------------------------------------------------------
# property: {attr}
# ----------------------------------------------------------
{getattr(ti.task, attr)}
"""
)
) |
Clear all task instances or only those matched by regex for a DAG(s). | def task_clear(args) -> None:
"""Clear all task instances or only those matched by regex for a DAG(s)."""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:
dags = [get_dag_by_file_location(args.dag_id)]
else:
# todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?
dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.partial_subset(
task_ids_or_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.yes,
include_subdags=not args.exclude_subdags,
include_parentdag=not args.exclude_parentdag,
) |
Start serve_logs sub-process. | def _serve_logs(skip_serve_logs: bool = False) -> Generator[None, None, None]:
"""Start serve_logs sub-process."""
sub_proc = None
if skip_serve_logs is False:
port = conf.getint("logging", "trigger_log_server_port", fallback=8794)
sub_proc = Process(target=partial(serve_logs, port=port))
sub_proc.start()
try:
yield
finally:
if sub_proc:
sub_proc.terminate() |
Start Airflow Triggerer. | def triggerer(args):
"""Start Airflow Triggerer."""
settings.MASK_SECRETS_IN_LOGS = True
print(settings.HEADER)
triggerer_heartrate = conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC")
run_command_with_daemon_option(
args=args,
process_name="triggerer",
callback=lambda: triggerer_run(args.skip_serve_logs, args.capacity, triggerer_heartrate),
should_setup_logging=True,
) |
Display all the variables. | def variables_list(args):
"""Display all the variables."""
with create_session() as session:
variables = session.scalars(select(Variable)).all()
AirflowConsole().print_as(data=variables, output=args.output, mapper=lambda x: {"key": x.key}) |
Display variable by a given name. | def variables_get(args):
"""Display variable by a given name."""
try:
if args.default is None:
var = Variable.get(args.key, deserialize_json=args.json)
print(var)
else:
var = Variable.get(args.key, deserialize_json=args.json, default_var=args.default)
print(var)
except (ValueError, KeyError) as e:
raise SystemExit(str(e).strip("'\"")) |
Create new variable with a given name, value and description. | def variables_set(args):
"""Create new variable with a given name, value and description."""
Variable.set(args.key, args.value, args.description, serialize_json=args.json)
print(f"Variable {args.key} created") |
Delete variable by a given name. | def variables_delete(args):
"""Delete variable by a given name."""
Variable.delete(args.key)
print(f"Variable {args.key} deleted") |
Import variables from a given file. | def variables_import(args, session):
"""Import variables from a given file."""
if not os.path.exists(args.file):
raise SystemExit("Missing variables file.")
with open(args.file) as varfile:
try:
var_json = json.load(varfile)
except JSONDecodeError:
raise SystemExit("Invalid variables file.")
suc_count = fail_count = 0
skipped = set()
action_on_existing = args.action_on_existing_key
existing_keys = set()
if action_on_existing != "overwrite":
existing_keys = set(session.scalars(select(Variable.key).where(Variable.key.in_(var_json))))
if action_on_existing == "fail" and existing_keys:
raise SystemExit(f"Failed. These keys: {sorted(existing_keys)} already exists.")
for k, v in var_json.items():
if action_on_existing == "skip" and k in existing_keys:
skipped.add(k)
continue
try:
Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e:
print(f"Variable import failed: {e!r}")
fail_count += 1
else:
suc_count += 1
print(f"{suc_count} of {len(var_json)} variables successfully updated.")
if fail_count:
print(f"{fail_count} variable(s) failed to be updated.")
if skipped:
print(
f"The variables with these keys: {list(sorted(skipped))} "
f"were skipped because they already exists"
) |
Export all the variables to the file. | def variables_export(args):
"""Export all the variables to the file."""
var_dict = {}
with create_session() as session:
qry = session.scalars(select(Variable))
data = json.JSONDecoder()
for var in qry:
try:
val = data.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
with args.file as varfile:
json.dump(var_dict, varfile, sort_keys=True, indent=4)
print_export_output("Variables", var_dict, varfile) |
Display Airflow version at the command line. | def version(args):
"""Display Airflow version at the command line."""
print(airflow.__version__) |
Start Airflow Webserver. | def webserver(args):
"""Start Airflow Webserver."""
print(settings.HEADER)
# Check for old/insecure config, and fail safe (i.e. don't launch) if the config is wildly insecure.
if conf.get("webserver", "secret_key") == "temporary_key":
from rich import print as rich_print
rich_print(
"[red][bold]ERROR:[/bold] The `secret_key` setting under the webserver config has an insecure "
"value - Airflow has failed safe and refuses to start. Please change this value to a new, "
"per-environment, randomly generated string, for example using this command `[cyan]openssl rand "
"-hex 30[/cyan]`",
file=sys.stderr,
)
sys.exit(1)
access_logfile = args.access_logfile or conf.get("webserver", "access_logfile")
error_logfile = args.error_logfile or conf.get("webserver", "error_logfile")
access_logformat = args.access_logformat or conf.get("webserver", "access_logformat")
num_workers = args.workers or conf.get("webserver", "workers")
worker_timeout = args.worker_timeout or conf.get("webserver", "web_server_worker_timeout")
ssl_cert = args.ssl_cert or conf.get("webserver", "web_server_ssl_cert")
ssl_key = args.ssl_key or conf.get("webserver", "web_server_ssl_key")
if not ssl_cert and ssl_key:
raise AirflowException("An SSL certificate must also be provided for use with " + ssl_key)
if ssl_cert and not ssl_key:
raise AirflowException("An SSL key must also be provided for use with " + ssl_cert)
from airflow.www.app import create_app
if args.debug:
print(f"Starting the web server on port {args.port} and host {args.hostname}.")
app = create_app(testing=conf.getboolean("core", "unit_test_mode"))
app.run(
debug=True,
use_reloader=not app.config["TESTING"],
port=args.port,
host=args.hostname,
ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None,
)
else:
print(
textwrap.dedent(
f"""\
Running the Gunicorn Server with:
Workers: {num_workers} {args.workerclass}
Host: {args.hostname}:{args.port}
Timeout: {worker_timeout}
Logfiles: {access_logfile} {error_logfile}
Access Logformat: {access_logformat}
================================================================="""
)
)
pid_file, _, _, _ = setup_locations("webserver", pid=args.pid)
run_args = [
sys.executable,
"-m",
"gunicorn",
"--workers",
str(num_workers),
"--worker-class",
str(args.workerclass),
"--timeout",
str(worker_timeout),
"--bind",
args.hostname + ":" + str(args.port),
"--name",
"airflow-webserver",
"--pid",
pid_file,
"--config",
"python:airflow.www.gunicorn_config",
]
if args.access_logfile:
run_args += ["--access-logfile", str(args.access_logfile)]
if args.error_logfile:
run_args += ["--error-logfile", str(args.error_logfile)]
if args.access_logformat and args.access_logformat.strip():
run_args += ["--access-logformat", str(args.access_logformat)]
if args.daemon:
run_args += ["--daemon"]
if ssl_cert:
run_args += ["--certfile", ssl_cert, "--keyfile", ssl_key]
run_args += ["airflow.www.app:cached_app()"]
if conf.getboolean("webserver", "reload_on_plugin_change", fallback=False):
log.warning(
"Setting reload_on_plugin_change = true prevents running Gunicorn with preloading. "
"This means the app cannot be loaded before workers are forked, and each worker has a "
"separate copy of the app. This may cause IntegrityError during webserver startup, and "
"should be avoided in production."
)
else:
# To prevent different workers creating the web app and
# all writing to the database at the same time, we use the --preload option.
run_args += ["--preload"]
def kill_proc(signum: int, gunicorn_master_proc: psutil.Process | subprocess.Popen) -> NoReturn:
log.info("Received signal: %s. Closing gunicorn.", signum)
gunicorn_master_proc.terminate()
with suppress(TimeoutError):
gunicorn_master_proc.wait(timeout=30)
if isinstance(gunicorn_master_proc, subprocess.Popen):
still_running = gunicorn_master_proc.poll() is not None
else:
still_running = gunicorn_master_proc.is_running()
if still_running:
gunicorn_master_proc.kill()
sys.exit(0)
def monitor_gunicorn(gunicorn_master_proc: psutil.Process | subprocess.Popen) -> NoReturn:
# Register signal handlers
signal.signal(signal.SIGINT, lambda signum, _: kill_proc(signum, gunicorn_master_proc))
signal.signal(signal.SIGTERM, lambda signum, _: kill_proc(signum, gunicorn_master_proc))
# These run forever until SIG{INT, TERM, KILL, ...} signal is sent
GunicornMonitor(
gunicorn_master_pid=gunicorn_master_proc.pid,
num_workers_expected=num_workers,
master_timeout=conf.getint("webserver", "web_server_master_timeout"),
worker_refresh_interval=conf.getint("webserver", "worker_refresh_interval", fallback=30),
worker_refresh_batch_size=conf.getint("webserver", "worker_refresh_batch_size", fallback=1),
reload_on_plugin_change=conf.getboolean(
"webserver", "reload_on_plugin_change", fallback=False
),
).start()
def start_and_monitor_gunicorn(args):
if args.daemon:
subprocess.Popen(run_args, close_fds=True)
# Reading pid of gunicorn master as it will be different that
# the one of process spawned above.
gunicorn_master_proc_pid = None
while not gunicorn_master_proc_pid:
sleep(0.1)
gunicorn_master_proc_pid = read_pid_from_pidfile(pid_file)
# Run Gunicorn monitor
gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid)
monitor_gunicorn(gunicorn_master_proc)
else:
with subprocess.Popen(run_args, close_fds=True) as gunicorn_master_proc:
monitor_gunicorn(gunicorn_master_proc)
if args.daemon:
# This makes possible errors get reported before daemonization
os.environ["SKIP_DAGS_PARSING"] = "True"
create_app(None)
os.environ.pop("SKIP_DAGS_PARSING")
pid_file_path = Path(pid_file)
monitor_pid_file = str(pid_file_path.with_name(f"{pid_file_path.stem}-monitor{pid_file_path.suffix}"))
run_command_with_daemon_option(
args=args,
process_name="webserver",
callback=lambda: start_and_monitor_gunicorn(args),
should_setup_logging=True,
pid_file=monitor_pid_file,
) |
Retrieve the dataset manager. | def resolve_dataset_manager() -> DatasetManager:
"""Retrieve the dataset manager."""
_dataset_manager_class = conf.getimport(
section="core",
key="dataset_manager_class",
fallback="airflow.datasets.manager.DatasetManager",
)
_dataset_manager_kwargs = conf.getjson(
section="core",
key="dataset_manager_kwargs",
fallback={},
)
return _dataset_manager_class(**_dataset_manager_kwargs) |
Place-hold a :class:`~urllib.parse.SplitResult`` normalizer.
:meta private: | def normalize_noop(parts: SplitResult) -> SplitResult:
"""Place-hold a :class:`~urllib.parse.SplitResult`` normalizer.
:meta private:
"""
return parts |
Sanitize a dataset URI.
This checks for URI validity, and normalizes the URI if needed. A fully
normalized URI is returned. | def _sanitize_uri(uri: str) -> str:
"""Sanitize a dataset URI.
This checks for URI validity, and normalizes the URI if needed. A fully
normalized URI is returned.
"""
if not uri:
raise ValueError("Dataset URI cannot be empty")
if uri.isspace():
raise ValueError("Dataset URI cannot be just whitespace")
if not uri.isascii():
raise ValueError("Dataset URI must only consist of ASCII characters")
parsed = urllib.parse.urlsplit(uri)
if not parsed.scheme and not parsed.netloc: # Does not look like a URI.
return uri
normalized_scheme = parsed.scheme.lower()
if normalized_scheme.startswith("x-"):
return uri
if normalized_scheme == "airflow":
raise ValueError("Dataset scheme 'airflow' is reserved")
_, auth_exists, normalized_netloc = parsed.netloc.rpartition("@")
if auth_exists:
# TODO: Collect this into a DagWarning.
warnings.warn(
"A dataset URI should not contain auth info (e.g. username or "
"password). It has been automatically dropped.",
UserWarning,
stacklevel=3,
)
if parsed.query:
normalized_query = urllib.parse.urlencode(sorted(urllib.parse.parse_qsl(parsed.query)))
else:
normalized_query = ""
parsed = parsed._replace(
scheme=normalized_scheme,
netloc=normalized_netloc,
path=parsed.path.rstrip("/") or "/", # Remove all trailing slashes.
query=normalized_query,
fragment="", # Ignore any fragments.
)
if (normalizer := _get_uri_normalizer(normalized_scheme)) is not None:
parsed = normalizer(parsed)
return urllib.parse.urlunsplit(parsed) |
Coerce a user input into a sanitized URI.
If the input value is a string, it is treated as a URI and sanitized. If the
input is a :class:`Dataset`, the URI it contains is considered sanitized and
returned directly.
:meta private: | def coerce_to_uri(value: str | Dataset) -> str:
"""Coerce a user input into a sanitized URI.
If the input value is a string, it is treated as a URI and sanitized. If the
input is a :class:`Dataset`, the URI it contains is considered sanitized and
returned directly.
:meta private:
"""
if isinstance(value, Dataset):
return value.uri
return _sanitize_uri(str(value)) |
Generate unique task id given a DAG (or if run in a DAG context).
IDs are generated by appending a unique number to the end of
the original task id.
Example:
task_id
task_id__1
task_id__2
...
task_id__20 | def get_unique_task_id(
task_id: str,
dag: DAG | None = None,
task_group: TaskGroup | None = None,
) -> str:
"""
Generate unique task id given a DAG (or if run in a DAG context).
IDs are generated by appending a unique number to the end of
the original task id.
Example:
task_id
task_id__1
task_id__2
...
task_id__20
"""
dag = dag or DagContext.get_current_dag()
if not dag:
return task_id
# We need to check if we are in the context of TaskGroup as the task_id may
# already be altered
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
tg_task_id = task_group.child_id(task_id) if task_group else task_id
if tg_task_id not in dag.task_ids:
return task_id
def _find_id_suffixes(dag: DAG) -> Iterator[int]:
prefix = re2.split(r"__\d+$", tg_task_id)[0]
for task_id in dag.task_ids:
match = re2.match(rf"^{prefix}__(\d+)$", task_id)
if match:
yield int(match.group(1))
yield 0 # Default if there's no matching task ID.
core = re2.split(r"__\d+$", task_id)[0]
return f"{core}__{max(_find_id_suffixes(dag)) + 1}" |
Generate a wrapper that wraps a function into an Airflow operator.
Can be reused in a single DAG.
:param python_callable: Function to decorate.
:param multiple_outputs: If set to True, the decorated function's return
value will be unrolled to multiple XCom values. Dict will unroll to XCom
values with its keys as XCom keys. If set to False (default), only at
most one XCom value is pushed.
:param decorated_operator_class: The operator that executes the logic needed
to run the python function in the correct environment.
Other kwargs are directly forwarded to the underlying operator class when
it's instantiated. | def task_decorator_factory(
python_callable: Callable | None = None,
*,
multiple_outputs: bool | None = None,
decorated_operator_class: type[BaseOperator],
**kwargs,
) -> TaskDecorator:
"""Generate a wrapper that wraps a function into an Airflow operator.
Can be reused in a single DAG.
:param python_callable: Function to decorate.
:param multiple_outputs: If set to True, the decorated function's return
value will be unrolled to multiple XCom values. Dict will unroll to XCom
values with its keys as XCom keys. If set to False (default), only at
most one XCom value is pushed.
:param decorated_operator_class: The operator that executes the logic needed
to run the python function in the correct environment.
Other kwargs are directly forwarded to the underlying operator class when
it's instantiated.
"""
if multiple_outputs is None:
multiple_outputs = cast(bool, attr.NOTHING)
if python_callable:
decorator = _TaskDecorator(
function=python_callable,
multiple_outputs=multiple_outputs,
operator_class=decorated_operator_class,
kwargs=kwargs,
)
return cast(TaskDecorator, decorator)
elif python_callable is not None:
raise TypeError("No args allowed while using @task, use kwargs instead")
def decorator_factory(python_callable):
return _TaskDecorator(
function=python_callable,
multiple_outputs=multiple_outputs,
operator_class=decorated_operator_class,
kwargs=kwargs,
)
return cast(TaskDecorator, decorator_factory) |
Wrap a function into a BashOperator.
Accepts kwargs for operator kwargs. Can be reused in a single DAG. This function is only used only used
during type checking or auto-completion.
:param python_callable: Function to decorate.
:meta private: | def bash_task(
python_callable: Callable | None = None,
**kwargs,
) -> TaskDecorator:
"""Wrap a function into a BashOperator.
Accepts kwargs for operator kwargs. Can be reused in a single DAG. This function is only used only used
during type checking or auto-completion.
:param python_callable: Function to decorate.
:meta private:
"""
return task_decorator_factory(
python_callable=python_callable,
decorated_operator_class=_BashDecoratedOperator,
**kwargs,
) |
Wrap a python function into a BranchExternalPythonOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False. | def branch_external_python_task(
python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs
) -> TaskDecorator:
"""
Wrap a python function into a BranchExternalPythonOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_BranchExternalPythonDecoratedOperator,
**kwargs,
) |
Wrap a python function into a BranchPythonOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False. | def branch_task(
python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs
) -> TaskDecorator:
"""
Wrap a python function into a BranchPythonOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_BranchPythonDecoratedOperator,
**kwargs,
) |
Wrap a python function into a BranchPythonVirtualenvOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False. | def branch_virtualenv_task(
python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs
) -> TaskDecorator:
"""
Wrap a python function into a BranchPythonVirtualenvOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_BranchPythonVirtualenvDecoratedOperator,
**kwargs,
) |
Wrap a callable into an Airflow operator to run via a Python virtual environment.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used during type checking or auto-completion.
:meta private:
:param python: Full path string (file-system specific) that points to a Python binary inside
a virtualenv that should be used (in ``VENV/bin`` folder). Should be absolute path
(so usually start with "/" or "X:/" depending on the filesystem/os used).
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys.
Defaults to False. | def external_python_task(
python: str | None = None,
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""
Wrap a callable into an Airflow operator to run via a Python virtual environment.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used during type checking or auto-completion.
:meta private:
:param python: Full path string (file-system specific) that points to a Python binary inside
a virtualenv that should be used (in ``VENV/bin`` folder). Should be absolute path
(so usually start with "/" or "X:/" depending on the filesystem/os used).
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python=python,
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonExternalDecoratedOperator,
**kwargs,
) |
Wrap a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False. | def python_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""
Wrap a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonDecoratedOperator,
**kwargs,
) |
Wrap a callable into an Airflow operator to run via a Python virtual environment.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used only used during type checking or auto-completion.
:meta private:
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys.
Defaults to False. | def virtualenv_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""
Wrap a callable into an Airflow operator to run via a Python virtual environment.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used only used during type checking or auto-completion.
:meta private:
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonVirtualenvDecoratedOperator,
**kwargs,
) |
Wrap a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate | def sensor_task(python_callable: Callable | None = None, **kwargs) -> TaskDecorator:
"""
Wrap a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=False,
decorated_operator_class=DecoratedSensorOperator,
**kwargs,
) |
Wrap a function into an ShortCircuitOperator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used only used during type checking or auto-completion.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
:meta private: | def short_circuit_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""
Wrap a function into an ShortCircuitOperator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
This function is only used only used during type checking or auto-completion.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
:meta private:
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_ShortCircuitDecoratedOperator,
**kwargs,
) |
Python TaskGroup decorator.
This wraps a function into an Airflow TaskGroup. When used as the
``@task_group()`` form, all arguments are forwarded to the underlying
TaskGroup class. Can be used to parametrize TaskGroup.
:param python_callable: Function to decorate.
:param tg_kwargs: Keyword arguments for the TaskGroup object. | def task_group(python_callable=None, **tg_kwargs):
"""Python TaskGroup decorator.
This wraps a function into an Airflow TaskGroup. When used as the
``@task_group()`` form, all arguments are forwarded to the underlying
TaskGroup class. Can be used to parametrize TaskGroup.
:param python_callable: Function to decorate.
:param tg_kwargs: Keyword arguments for the TaskGroup object.
"""
if callable(python_callable) and not tg_kwargs:
return _TaskGroupFactory(function=python_callable, tg_kwargs=tg_kwargs)
return functools.partial(_TaskGroupFactory, tg_kwargs=tg_kwargs) |
Determine which empty_task should be run based on if the execution date minute is even or odd.
:param dict kwargs: Context
:return: Id of the task to run | def should_run(**kwargs) -> str:
"""
Determine which empty_task should be run based on if the execution date minute is even or odd.
:param dict kwargs: Context
:return: Id of the task to run
"""
print(
f"------------- exec dttm = {kwargs['execution_date']} and minute = {kwargs['execution_date'].minute}"
)
if kwargs["execution_date"].minute % 2 == 0:
return "empty_task_1"
else:
return "empty_task_2" |
DAG to send server IP to email.
:param email: Email to send IP to. Defaults to [email protected]. | def example_dag_decorator(email: str = "[email protected]"):
"""
DAG to send server IP to email.
:param email: Email to send IP to. Defaults to [email protected].
"""
get_ip = GetRequestOperator(task_id="get_ip", url="http://httpbin.org/get")
@task(multiple_outputs=True)
def prepare_email(raw_json: dict[str, Any]) -> dict[str, str]:
external_ip = raw_json["origin"]
return {
"subject": f"Server connected from {external_ip}",
"body": f"Seems like today your server executing Airflow is connected from IP {external_ip}<br>",
}
email_info = prepare_email(get_ip.output)
EmailOperator(
task_id="send_email", to=email, subject=email_info["subject"], html_content=email_info["body"]
) |
Print out the "foo" param passed in via
`airflow tasks test example_passing_params_via_test_command run_this <date>
-t '{"foo":"bar"}'` | def my_py_command(params, test_mode=None, task=None):
"""
Print out the "foo" param passed in via
`airflow tasks test example_passing_params_via_test_command run_this <date>
-t '{"foo":"bar"}'`
"""
if test_mode:
print(
f" 'foo' was passed in via test={test_mode} command : kwargs[params][foo] = {task.params['foo']}"
)
# Print out the value of "miff", passed in below via the Python Operator
print(f" 'miff' was passed in via task params = {params['miff']}")
return 1 |
Print out the "foo" param passed in via
`airflow tasks test example_passing_params_via_test_command env_var_test_task <date>
--env-vars '{"foo":"bar"}'` | def print_env_vars(test_mode=None):
"""
Print out the "foo" param passed in via
`airflow tasks test example_passing_params_via_test_command env_var_test_task <date>
--env-vars '{"foo":"bar"}'`
"""
if test_mode:
print(f"foo={os.environ.get('foo')}")
print(f"AIRFLOW_TEST_MODE={os.environ.get('AIRFLOW_TEST_MODE')}") |
Instantiate a number of operators for the given DAG.
:param str suffix: Suffix to append to the operator task_ids
:param str trigger_rule: TriggerRule for the join task
:param DAG dag_: The DAG to run the operators on | def create_test_pipeline(suffix, trigger_rule):
"""
Instantiate a number of operators for the given DAG.
:param str suffix: Suffix to append to the operator task_ids
:param str trigger_rule: TriggerRule for the join task
:param DAG dag_: The DAG to run the operators on
"""
skip_operator = EmptySkipOperator(task_id=f"skip_operator_{suffix}")
always_true = EmptyOperator(task_id=f"always_true_{suffix}")
join = EmptyOperator(task_id=trigger_rule, trigger_rule=trigger_rule)
final = EmptyOperator(task_id=f"final_{suffix}")
skip_operator >> join
always_true >> join
join >> final |
Empty Task which is First Task of Dag | def task_start():
"""Empty Task which is First Task of Dag"""
return "[Task_start]" |
Empty Task1 | def task_1(value: int) -> str:
"""Empty Task1"""
return f"[ Task1 {value} ]" |
Empty Task2 | def task_2(value: str) -> str:
"""Empty Task2"""
return f"[ Task2 {value} ]" |
Empty Task3 | def task_3(value: str) -> None:
"""Empty Task3"""
print(f"[ Task3 {value} ]") |
Empty Task which is Last Task of Dag | def task_end() -> None:
"""Empty Task which is Last Task of Dag"""
print("[ Task_End ]") |
TaskGroup for grouping related Tasks | def task_group_function(value: int) -> None:
"""TaskGroup for grouping related Tasks"""
task_3(task_2(task_1(value))) |
Print the payload "message" passed to the DagRun conf attribute.
:param dag_run: The DagRun object | def run_this_func(dag_run=None):
"""
Print the payload "message" passed to the DagRun conf attribute.
:param dag_run: The DagRun object
"""
print(f"Remotely received value of {dag_run.conf.get('message')} for key=message") |
Pushes an XCom without a specific target | def push(ti=None):
"""Pushes an XCom without a specific target"""
ti.xcom_push(key="value from pusher 1", value=value_1) |
Pushes an XCom without a specific target, just by returning it | def push_by_returning():
"""Pushes an XCom without a specific target, just by returning it"""
return value_2 |
Pull all previously pushed XComs and check if the pushed values match the pulled values. | def puller(pulled_value_2, ti=None):
"""Pull all previously pushed XComs and check if the pushed values match the pulled values."""
pulled_value_1 = ti.xcom_pull(task_ids="push", key="value from pusher 1")
_compare_values(pulled_value_1, value_1)
_compare_values(pulled_value_2, value_2) |
Empty function | def generate_value():
"""Empty function"""
return "Bring me a shrubbery!" |
Empty function | def print_value(value, ts=None):
"""Empty function"""
log.info("The knights of Ni say: %s (at %s)", value, ts) |
### Object Storage Tutorial Documentation
This is a tutorial DAG to showcase the usage of the Object Storage API.
Documentation that goes along with the Airflow Object Storage tutorial is
located
[here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial/objectstorage.html) | def tutorial_objectstorage():
"""
### Object Storage Tutorial Documentation
This is a tutorial DAG to showcase the usage of the Object Storage API.
Documentation that goes along with the Airflow Object Storage tutorial is
located
[here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial/objectstorage.html)
"""
# [START get_air_quality_data]
@task
def get_air_quality_data(**kwargs) -> ObjectStoragePath:
"""
#### Get Air Quality Data
This task gets air quality data from the Finnish Meteorological Institute's
open data API. The data is saved as parquet.
"""
import pandas as pd
execution_date = kwargs["logical_date"]
start_time = kwargs["data_interval_start"]
params = {
"format": "json",
"precision": "double",
"groupareas": "0",
"producer": "airquality_urban",
"area": "Uusimaa",
"param": ",".join(aq_fields.keys()),
"starttime": start_time.isoformat(timespec="seconds"),
"endtime": execution_date.isoformat(timespec="seconds"),
"tz": "UTC",
}
response = requests.get(API, params=params)
response.raise_for_status()
# ensure the bucket exists
base.mkdir(exist_ok=True)
formatted_date = execution_date.format("YYYYMMDD")
path = base / f"air_quality_{formatted_date}.parquet"
df = pd.DataFrame(response.json()).astype(aq_fields)
with path.open("wb") as file:
df.to_parquet(file)
return path
# [END get_air_quality_data]
# [START analyze]
@task
def analyze(path: ObjectStoragePath, **kwargs):
"""
#### Analyze
This task analyzes the air quality data, prints the results
"""
import duckdb
conn = duckdb.connect(database=":memory:")
conn.register_filesystem(path.fs)
conn.execute(f"CREATE OR REPLACE TABLE airquality_urban AS SELECT * FROM read_parquet('{path}')")
df2 = conn.execute("SELECT * FROM airquality_urban").fetchdf()
print(df2.head())
# [END analyze]
# [START main_flow]
obj_path = get_air_quality_data()
analyze(obj_path) |
### TaskFlow API Tutorial Documentation
This is a simple data pipeline example which demonstrates the use of
the TaskFlow API using three simple tasks for Extract, Transform, and Load.
Documentation that goes along with the Airflow TaskFlow API tutorial is
located
[here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html) | def tutorial_taskflow_api():
"""
### TaskFlow API Tutorial Documentation
This is a simple data pipeline example which demonstrates the use of
the TaskFlow API using three simple tasks for Extract, Transform, and Load.
Documentation that goes along with the Airflow TaskFlow API tutorial is
located
[here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html)
"""
# [END instantiate_dag]
# [START extract]
@task()
def extract():
"""
#### Extract task
A simple Extract task to get data ready for the rest of the data
pipeline. In this case, getting data is simulated by reading from a
hardcoded JSON string.
"""
data_string = '{"1001": 301.27, "1002": 433.21, "1003": 502.22}'
order_data_dict = json.loads(data_string)
return order_data_dict
# [END extract]
# [START transform]
@task(multiple_outputs=True)
def transform(order_data_dict: dict):
"""
#### Transform task
A simple Transform task which takes in the collection of order data and
computes the total order value.
"""
total_order_value = 0
for value in order_data_dict.values():
total_order_value += value
return {"total_order_value": total_order_value}
# [END transform]
# [START load]
@task()
def load(total_order_value: float):
"""
#### Load task
A simple Load task which takes in the result of the Transform task and
instead of saving it to end user review, just prints it out.
"""
print(f"Total order value is: {total_order_value:.2f}")
# [END load]
# [START main_flow]
order_data = extract()
order_summary = transform(order_data)
load(order_summary["total_order_value"]) |
This method is called when task state changes to RUNNING.
Through callback, parameters like previous_task_state, task_instance object can be accessed.
This will give more information about current task_instance that is running its dag_run,
task and dag information. | def on_task_instance_running(previous_state: TaskInstanceState, task_instance: TaskInstance, session):
"""
This method is called when task state changes to RUNNING.
Through callback, parameters like previous_task_state, task_instance object can be accessed.
This will give more information about current task_instance that is running its dag_run,
task and dag information.
"""
print("Task instance is in running state")
print(" Previous state of the Task instance:", previous_state)
state: TaskInstanceState = task_instance.state
name: str = task_instance.task_id
start_date = task_instance.start_date
dagrun = task_instance.dag_run
dagrun_status = dagrun.state
task = task_instance.task
if TYPE_CHECKING:
assert task
dag = task.dag
dag_name = None
if dag:
dag_name = dag.dag_id
print(f"Current task name:{name} state:{state} start_date:{start_date}")
print(f"Dag name:{dag_name} and current dag run status:{dagrun_status}") |
This method is called when task state changes to SUCCESS.
Through callback, parameters like previous_task_state, task_instance object can be accessed.
This will give more information about current task_instance that has succeeded its
dag_run, task and dag information. | def on_task_instance_success(previous_state: TaskInstanceState, task_instance: TaskInstance, session):
"""
This method is called when task state changes to SUCCESS.
Through callback, parameters like previous_task_state, task_instance object can be accessed.
This will give more information about current task_instance that has succeeded its
dag_run, task and dag information.
"""
print("Task instance in success state")
print(" Previous state of the Task instance:", previous_state)
dag_id = task_instance.dag_id
hostname = task_instance.hostname
operator = task_instance.operator
dagrun = task_instance.dag_run
queued_at = dagrun.queued_at
print(f"Dag name:{dag_id} queued_at:{queued_at}")
print(f"Task hostname:{hostname} operator:{operator}") |
This method is called when task state changes to FAILED.
Through callback, parameters like previous_task_state, task_instance object can be accessed.
This will give more information about current task_instance that has failed its dag_run,
task and dag information. | def on_task_instance_failed(
previous_state: TaskInstanceState, task_instance: TaskInstance, error: None | str | BaseException, session
):
"""
This method is called when task state changes to FAILED.
Through callback, parameters like previous_task_state, task_instance object can be accessed.
This will give more information about current task_instance that has failed its dag_run,
task and dag information.
"""
print("Task instance in failure state")
start_date = task_instance.start_date
end_date = task_instance.end_date
duration = task_instance.duration
dagrun = task_instance.dag_run
task = task_instance.task
if TYPE_CHECKING:
assert task
dag = task.dag
print(f"Task start:{start_date} end:{end_date} duration:{duration}")
print(f"Task:{task} dag:{dag} dagrun:{dagrun}")
if error:
print(f"Failure caused by {error}") |
This method is called when dag run state changes to SUCCESS. | def on_dag_run_success(dag_run: DagRun, msg: str):
"""
This method is called when dag run state changes to SUCCESS.
"""
print("Dag run in success state")
start_date = dag_run.start_date
end_date = dag_run.end_date
print(f"Dag run start:{start_date} end:{end_date}") |
This method is called when dag run state changes to FAILED. | def on_dag_run_failed(dag_run: DagRun, msg: str):
"""
This method is called when dag run state changes to FAILED.
"""
print("Dag run in failure state")
dag_id = dag_run.dag_id
run_id = dag_run.run_id
external_trigger = dag_run.external_trigger
print(f"Dag information:{dag_id} Run id: {run_id} external trigger: {external_trigger}")
print(f"Failed with message: {msg}") |
This method is called when dag run state changes to RUNNING. | def on_dag_run_running(dag_run: DagRun, msg: str):
"""
This method is called when dag run state changes to RUNNING.
"""
print("Dag run in running state")
queued_at = dag_run.queued_at
dag_hash_info = dag_run.dag_hash
print(f"Dag information Queued at: {queued_at} hash info: {dag_hash_info}") |
Generate a DAG to be used as a subdag.
:param str parent_dag_name: Id of the parent DAG
:param str child_dag_name: Id of the child DAG
:param dict args: Default arguments to provide to the subdag
:return: DAG to use as a subdag | def subdag(parent_dag_name, child_dag_name, args) -> DAG:
"""
Generate a DAG to be used as a subdag.
:param str parent_dag_name: Id of the parent DAG
:param str child_dag_name: Id of the child DAG
:param dict args: Default arguments to provide to the subdag
:return: DAG to use as a subdag
"""
dag_subdag = DAG(
dag_id=f"{parent_dag_name}.{child_dag_name}",
default_args=args,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
catchup=False,
schedule="@daily",
)
for i in range(5):
EmptyOperator(
task_id=f"{child_dag_name}-task-{i + 1}",
default_args=args,
dag=dag_subdag,
)
return dag_subdag |
Get a filesystem by scheme.
:param scheme: the scheme to get the filesystem for
:return: the filesystem method
:param conn_id: the airflow connection id to use
:param storage_options: the storage options to pass to the filesystem | def get_fs(
scheme: str, conn_id: str | None = None, storage_options: Properties | None = None
) -> AbstractFileSystem:
"""
Get a filesystem by scheme.
:param scheme: the scheme to get the filesystem for
:return: the filesystem method
:param conn_id: the airflow connection id to use
:param storage_options: the storage options to pass to the filesystem
"""
filesystems = _register_filesystems()
try:
fs = filesystems[scheme]
except KeyError:
raise ValueError(f"No filesystem registered for scheme {scheme}") from None
options = storage_options or {}
# MyPy does not recognize dynamic parameters inspection when we call the method, and we have to do
# it for compatibility reasons with already released providers, that's why we need to ignore
# mypy errors here
parameters = inspect.signature(fs).parameters
if len(parameters) == 1:
if options:
raise AttributeError(
f"Filesystem {scheme} does not support storage options, but options were passed."
f"This most likely means that you are using an old version of the provider that does not "
f"support storage options. Please upgrade the provider if possible."
)
return fs(conn_id) # type: ignore[call-arg]
return fs(conn_id, options) |
Check if a filesystem is available for a scheme.
:param scheme: the scheme to check
:return: True if a filesystem is available for the scheme | def has_fs(scheme: str) -> bool:
"""
Check if a filesystem is available for a scheme.
:param scheme: the scheme to check
:return: True if a filesystem is available for the scheme
"""
return scheme in _register_filesystems() |
Attach a filesystem or object storage.
:param alias: the alias to be used to refer to the store, autogenerated if omitted
:param protocol: the scheme that is used without ://
:param conn_id: the connection to use to connect to the filesystem
:param encryption_type: the encryption type to use to connect to the filesystem
:param fs: the filesystem type to use to connect to the filesystem | def attach(
protocol: str | None = None,
conn_id: str | None = None,
alias: str | None = None,
encryption_type: str | None = "",
fs: AbstractFileSystem | None = None,
**kwargs,
) -> ObjectStore:
"""
Attach a filesystem or object storage.
:param alias: the alias to be used to refer to the store, autogenerated if omitted
:param protocol: the scheme that is used without ://
:param conn_id: the connection to use to connect to the filesystem
:param encryption_type: the encryption type to use to connect to the filesystem
:param fs: the filesystem type to use to connect to the filesystem
"""
if alias:
if store := _STORE_CACHE.get(alias):
return store
elif not protocol:
raise ValueError(f"No registered store with alias: {alias}")
if not protocol:
raise ValueError("No protocol specified and no alias provided")
if not alias:
alias = f"{protocol}-{conn_id}" if conn_id else protocol
if store := _STORE_CACHE.get(alias):
return store
_STORE_CACHE[alias] = store = ObjectStore(protocol=protocol, conn_id=conn_id, fs=fs, **kwargs)
return store |
Return the most recent job of this type, if any, based on last heartbeat received.
Jobs in "running" state take precedence over others to make sure alive
job is returned if it is available.
:param job_type: job type to query for to get the most recent job for
:param session: Database session | def most_recent_job(job_type: str, session: Session = NEW_SESSION) -> Job | JobPydantic | None:
"""
Return the most recent job of this type, if any, based on last heartbeat received.
Jobs in "running" state take precedence over others to make sure alive
job is returned if it is available.
:param job_type: job type to query for to get the most recent job for
:param session: Database session
"""
return session.scalar(
select(Job)
.where(Job.job_type == job_type)
.order_by(
# Put "running" jobs at the front.
case({JobState.RUNNING: 0}, value=Job.state, else_=1),
Job.latest_heartbeat.desc(),
)
.limit(1)
) |
Run the job.
The Job is always an ORM object and setting the state is happening within the
same DB session and the session is kept open throughout the whole execution.
:meta private: | def run_job(
job: Job, execute_callable: Callable[[], int | None], session: Session = NEW_SESSION
) -> int | None:
"""
Run the job.
The Job is always an ORM object and setting the state is happening within the
same DB session and the session is kept open throughout the whole execution.
:meta private:
"""
job.prepare_for_execution(session=session)
try:
return execute_job(job, execute_callable=execute_callable)
finally:
job.complete_execution(session=session) |
Execute the job.
Job execution requires no session as generally executing session does not require an
active database connection. The session might be temporary acquired and used if the job
runs heartbeat during execution, but this connection is only acquired for the time of heartbeat
and in case of AIP-44 implementation it happens over the Internal API rather than directly via
the database.
After the job is completed, state of the Job is updated and it should be updated in the database,
which happens in the "complete_execution" step (which again can be executed locally in case of
database operations or over the Internal API call.
:param job: Job to execute - it can be either DB job or it's Pydantic serialized version. It does
not really matter, because except of running the heartbeat and state setting,
the runner should not modify the job state.
:param execute_callable: callable to execute when running the job.
:meta private: | def execute_job(job: Job, execute_callable: Callable[[], int | None]) -> int | None:
"""
Execute the job.
Job execution requires no session as generally executing session does not require an
active database connection. The session might be temporary acquired and used if the job
runs heartbeat during execution, but this connection is only acquired for the time of heartbeat
and in case of AIP-44 implementation it happens over the Internal API rather than directly via
the database.
After the job is completed, state of the Job is updated and it should be updated in the database,
which happens in the "complete_execution" step (which again can be executed locally in case of
database operations or over the Internal API call.
:param job: Job to execute - it can be either DB job or it's Pydantic serialized version. It does
not really matter, because except of running the heartbeat and state setting,
the runner should not modify the job state.
:param execute_callable: callable to execute when running the job.
:meta private:
"""
ret = None
try:
ret = execute_callable()
# In case of max runs or max duration
job.state = JobState.SUCCESS
except SystemExit:
# In case of ^C or SIGTERM
job.state = JobState.SUCCESS
except Exception:
job.state = JobState.FAILED
raise
return ret |
Perform heartbeat for the Job passed to it,optionally checking if it is necessary.
:param job: job to perform heartbeat for
:param heartbeat_callback: callback to run by the heartbeat
:param only_if_necessary: only heartbeat if it is necessary (i.e. if there are things to run for
triggerer for example) | def perform_heartbeat(
job: Job, heartbeat_callback: Callable[[Session], None], only_if_necessary: bool
) -> None:
"""
Perform heartbeat for the Job passed to it,optionally checking if it is necessary.
:param job: job to perform heartbeat for
:param heartbeat_callback: callback to run by the heartbeat
:param only_if_necessary: only heartbeat if it is necessary (i.e. if there are things to run for
triggerer for example)
"""
seconds_remaining: float = 0.0
if job.latest_heartbeat and job.heartrate:
seconds_remaining = job.heartrate - (timezone.utcnow() - job.latest_heartbeat).total_seconds()
if seconds_remaining > 0 and only_if_necessary:
return
job.heartbeat(heartbeat_callback=heartbeat_callback) |
Whether this is a parent process.
Return True if the current process is the parent process.
False if the current process is a child process started by multiprocessing. | def _is_parent_process() -> bool:
"""
Whether this is a parent process.
Return True if the current process is the parent process.
False if the current process is a child process started by multiprocessing.
"""
return multiprocessing.current_process().name == "MainProcess" |
Configure logging where each trigger logs to its own file and can be exposed via the airflow webserver.
Generally speaking, we take the log handler configured for logger ``airflow.task``,
wrap it with TriggerHandlerWrapper, and set it as the handler for root logger.
If there already is a handler configured for the root logger and it supports triggers, we wrap it instead.
:meta private: | def configure_trigger_log_handler():
"""
Configure logging where each trigger logs to its own file and can be exposed via the airflow webserver.
Generally speaking, we take the log handler configured for logger ``airflow.task``,
wrap it with TriggerHandlerWrapper, and set it as the handler for root logger.
If there already is a handler configured for the root logger and it supports triggers, we wrap it instead.
:meta private:
"""
global HANDLER_SUPPORTS_TRIGGERER
def should_wrap(handler):
return handler.__dict__.get("trigger_should_wrap", False) or handler.__class__.__dict__.get(
"trigger_should_wrap", False
)
def should_queue(handler):
return handler.__dict__.get("trigger_should_queue", True) or handler.__class__.__dict__.get(
"trigger_should_queue", True
)
def send_trigger_end_marker(handler):
val = handler.__dict__.get("trigger_send_end_marker", None)
if val is not None:
return val
val = handler.__class__.__dict__.get("trigger_send_end_marker", None)
if val is not None:
return val
return True
def supports_triggerer(handler):
return (
should_wrap(handler)
or handler.__dict__.get("trigger_supported", False)
or handler.__class__.__dict__.get("trigger_supported", False)
)
def get_task_handler_from_logger(logger_):
for h in logger_.handlers:
if isinstance(h, FileTaskHandler) and not supports_triggerer(h):
warnings.warn(
f"Handler {h.__class__.__name__} does not support "
"individual trigger logging. Please check the release notes "
"for your provider to see if a newer version supports "
"individual trigger logging.",
category=UserWarning,
stacklevel=3,
)
if supports_triggerer(h):
return h
def find_suitable_task_handler():
# check root logger then check airflow.task to see if a handler
# suitable for use with TriggerHandlerWrapper (has trigger_should_wrap
# attr, likely inherits from FileTaskHandler)
h = get_task_handler_from_logger(root_logger)
if not h:
# try to use handler configured from airflow task
logger.debug("No task logger configured for root logger; trying `airflow.task`.")
h = get_task_handler_from_logger(logging.getLogger("airflow.task"))
if h:
logger.debug("Using logging configuration from `airflow.task`")
if not h:
warnings.warn(
"Could not find log handler suitable for individual trigger logging.",
category=UserWarning,
stacklevel=3,
)
return None
return h
def filter_trigger_logs_from_other_root_handlers(new_hdlr):
# we add context vars to log records emitted for individual triggerer logging
# we want these records to be processed by our special trigger handler wrapper
# but not by any other handlers, so we filter out these messages from
# other handlers by adding DropTriggerLogsFilter
# we could consider only adding this filter to the default console logger
# so as to leave other custom handlers alone
for h in root_logger.handlers:
if h is not new_hdlr:
h.addFilter(DropTriggerLogsFilter())
def add_handler_wrapper_to_root(base_handler):
# first make sure we remove from root logger if it happens to be there
# it could have come from root or airflow.task, but we only need
# to make sure we remove from root, since messages will not flow
# through airflow.task
if base_handler in root_logger.handlers:
root_logger.removeHandler(base_handler)
logger.info("Setting up TriggererHandlerWrapper with handler %s", base_handler)
h = TriggererHandlerWrapper(base_handler=base_handler, level=base_handler.level)
# just extra cautious, checking if user manually configured it there
if h not in root_logger.handlers:
root_logger.addHandler(h)
return h
root_logger = logging.getLogger()
task_handler = find_suitable_task_handler()
if not task_handler:
return None
if TYPE_CHECKING:
assert isinstance(task_handler, FileTaskHandler)
if should_wrap(task_handler):
trigger_handler = add_handler_wrapper_to_root(task_handler)
else:
trigger_handler = copy(task_handler)
root_logger.addHandler(trigger_handler)
filter_trigger_logs_from_other_root_handlers(trigger_handler)
if send_trigger_end_marker(trigger_handler) is False:
global SEND_TRIGGER_END_MARKER
SEND_TRIGGER_END_MARKER = False
HANDLER_SUPPORTS_TRIGGERER = True
return should_queue(trigger_handler) |
Route log messages to a queue and process them with QueueListener.
Airflow task handlers make blocking I/O calls.
We replace trigger log handlers, with LocalQueueHandler,
which sends log records to a queue.
Then we start a QueueListener in a thread, which is configured
to consume the queue and pass the records to the handlers as
originally configured. This keeps the handler I/O out of the
async event loop.
:meta private: | def setup_queue_listener():
"""
Route log messages to a queue and process them with QueueListener.
Airflow task handlers make blocking I/O calls.
We replace trigger log handlers, with LocalQueueHandler,
which sends log records to a queue.
Then we start a QueueListener in a thread, which is configured
to consume the queue and pass the records to the handlers as
originally configured. This keeps the handler I/O out of the
async event loop.
:meta private:
"""
queue = SimpleQueue()
root_logger = logging.getLogger()
handlers: list[logging.Handler] = []
queue_handler = LocalQueueHandler(queue)
queue_handler.addFilter(TriggerMetadataFilter())
root_logger.addHandler(queue_handler)
for h in root_logger.handlers[:]:
if h is not queue_handler and "pytest" not in h.__module__:
root_logger.removeHandler(h)
handlers.append(h)
this_logger = logging.getLogger(__name__)
if handlers:
this_logger.info("Setting up logging queue listener with handlers %s", handlers)
listener = logging.handlers.QueueListener(queue, *handlers, respect_handler_level=True)
listener.start()
return listener
else:
this_logger.warning("Unable to set up individual trigger logging")
return None |
Attach additional specs to an existing pod object.
:param pod: A pod to attach a list of Kubernetes objects to
:param k8s_objects: a potential None list of K8SModels
:return: pod with the objects attached if they exist | def append_to_pod(pod: k8s.V1Pod, k8s_objects: list[K8SModel] | None):
"""
Attach additional specs to an existing pod object.
:param pod: A pod to attach a list of Kubernetes objects to
:param k8s_objects: a potential None list of K8SModels
:return: pod with the objects attached if they exist
"""
if not k8s_objects:
return pod
return reduce(lambda p, o: o.attach_to_pod(p), k8s_objects, pod) |
Enable TCP keepalive mechanism.
This prevents urllib3 connection to hang indefinitely when idle connection
is time-outed on services like cloud load balancers or firewalls.
See https://github.com/apache/airflow/pull/11406 for detailed explanation.
Please ping @michalmisiewicz or @dimberman in the PR if you want to modify this function. | def _enable_tcp_keepalive() -> None:
"""
Enable TCP keepalive mechanism.
This prevents urllib3 connection to hang indefinitely when idle connection
is time-outed on services like cloud load balancers or firewalls.
See https://github.com/apache/airflow/pull/11406 for detailed explanation.
Please ping @michalmisiewicz or @dimberman in the PR if you want to modify this function.
"""
import socket
from urllib3.connection import HTTPConnection, HTTPSConnection
tcp_keep_idle = conf.getint("kubernetes_executor", "tcp_keep_idle")
tcp_keep_intvl = conf.getint("kubernetes_executor", "tcp_keep_intvl")
tcp_keep_cnt = conf.getint("kubernetes_executor", "tcp_keep_cnt")
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
if hasattr(socket, "TCP_KEEPIDLE"):
socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, tcp_keep_idle))
else:
log.debug("Unable to set TCP_KEEPIDLE on this platform")
if hasattr(socket, "TCP_KEEPINTVL"):
socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, tcp_keep_intvl))
else:
log.debug("Unable to set TCP_KEEPINTVL on this platform")
if hasattr(socket, "TCP_KEEPCNT"):
socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPCNT, tcp_keep_cnt))
else:
log.debug("Unable to set TCP_KEEPCNT on this platform")
HTTPSConnection.default_socket_options = HTTPSConnection.default_socket_options + socket_options
HTTPConnection.default_socket_options = HTTPConnection.default_socket_options + socket_options |
Retrieve Kubernetes client.
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
:param config_file: configuration file
:return kubernetes client
:rtype client.CoreV1Api | def get_kube_client(
in_cluster: bool = conf.getboolean("kubernetes_executor", "in_cluster"),
cluster_context: str | None = None,
config_file: str | None = None,
) -> client.CoreV1Api:
"""
Retrieve Kubernetes client.
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
:param config_file: configuration file
:return kubernetes client
:rtype client.CoreV1Api
"""
if not has_kubernetes:
raise _import_err
if conf.getboolean("kubernetes_executor", "enable_tcp_keepalive"):
_enable_tcp_keepalive()
configuration = _get_default_configuration()
api_client_retry_configuration = conf.getjson(
"kubernetes_executor", "api_client_retry_configuration", fallback={}
)
if not conf.getboolean("kubernetes_executor", "verify_ssl"):
_disable_verify_ssl()
if isinstance(api_client_retry_configuration, dict):
configuration.retries = urllib3.util.Retry(**api_client_retry_configuration)
else:
raise ValueError("api_client_retry_configuration should be a dictionary")
if in_cluster:
config.load_incluster_config(client_configuration=configuration)
else:
if cluster_context is None:
cluster_context = conf.get("kubernetes_executor", "cluster_context", fallback=None)
if config_file is None:
config_file = conf.get("kubernetes_executor", "config_file", fallback=None)
config.load_kube_config(
config_file=config_file, context=cluster_context, client_configuration=configuration
)
if not conf.getboolean("kubernetes_executor", "verify_ssl"):
configuration.verify_ssl = False
ssl_ca_cert = conf.get("kubernetes_executor", "ssl_ca_cert")
if ssl_ca_cert:
configuration.ssl_ca_cert = ssl_ca_cert
api_client = client.ApiClient(configuration=configuration)
return client.CoreV1Api(api_client) |
Generate random lowercase alphanumeric string of length num.
:meta private: | def rand_str(num):
"""Generate random lowercase alphanumeric string of length num.
:meta private:
"""
return "".join(secrets.choice(alphanum_lower) for _ in range(num)) |
Add random string to pod name while staying under max length.
:param pod_name: name of the pod
:param rand_len: length of the random string to append
:max_len: maximum length of the pod name
:meta private: | def add_pod_suffix(pod_name: str, rand_len: int = 8, max_len: int = 80) -> str:
"""Add random string to pod name while staying under max length.
:param pod_name: name of the pod
:param rand_len: length of the random string to append
:max_len: maximum length of the pod name
:meta private:
"""
suffix = "-" + rand_str(rand_len)
return pod_name[: max_len - len(suffix)].strip("-.") + suffix |