response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Checks the heartbeat logic | def test_is_alive():
"""Checks the heartbeat logic"""
# Current time
triggerer_job = Job(heartrate=10, state=State.RUNNING)
assert triggerer_job.is_alive()
# Slightly old, but still fresh
triggerer_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=20)
assert triggerer_job.is_alive()
# Old enough to fail
triggerer_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=31)
assert not triggerer_job.is_alive()
# Completed state should not be alive
triggerer_job.state = State.SUCCESS
triggerer_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=10)
assert not triggerer_job.is_alive(), "Completed jobs even with recent heartbeat should not be alive" |
Checks the triggerer-is-needed logic | def test_is_needed(session):
"""Checks the triggerer-is-needed logic"""
# No triggers, no need
triggerer_job = Job(heartrate=10, state=State.RUNNING)
triggerer_job_runner = TriggererJobRunner(triggerer_job)
assert triggerer_job_runner.is_needed() is False
# Add a trigger, it's needed
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
assert triggerer_job_runner.is_needed() is True |
Tests that TriggererJob correctly sets capacity to a valid value passed in as a CLI arg,
handles invalid args, or sets it to a default value if no arg is passed. | def test_capacity_decode():
"""
Tests that TriggererJob correctly sets capacity to a valid value passed in as a CLI arg,
handles invalid args, or sets it to a default value if no arg is passed.
"""
# Positive cases
variants = [
42,
None,
]
for input_str in variants:
job = Job()
job_runner = TriggererJobRunner(job, capacity=input_str)
assert job_runner.capacity == input_str or job_runner.capacity == 1000
# Negative cases
variants = [
"NAN",
0.5,
-42,
4 / 2, # Resolves to a float, in addition to being just plain weird
]
for input_str in variants:
job = Job()
with pytest.raises(ValueError):
TriggererJobRunner(job=job, capacity=input_str) |
Checks that the triggerer will correctly see a new Trigger in the database
and send it to the trigger runner, and then delete it when it vanishes. | def test_trigger_lifecycle(session):
"""
Checks that the triggerer will correctly see a new Trigger in the database
and send it to the trigger runner, and then delete it when it vanishes.
"""
# Use a trigger that will not fire for the lifetime of the test
# (we want to avoid it firing and deleting itself)
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
dag_model, run, trigger_orm, task_instance = create_trigger_in_db(session, trigger)
# Make a TriggererJobRunner and have it retrieve DB tasks
job = Job()
job_runner = TriggererJobRunner(job)
job_runner.load_triggers()
# Make sure it turned up in TriggerRunner's queue
assert [x for x, y in job_runner.trigger_runner.to_create] == [1]
# Now, start TriggerRunner up (and set it as a daemon thread during tests)
job_runner.daemon = True
job_runner.trigger_runner.start()
try:
# Wait for up to 3 seconds for it to appear in the TriggerRunner's storage
for _ in range(30):
if job_runner.trigger_runner.triggers:
assert list(job_runner.trigger_runner.triggers.keys()) == [1]
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never created trigger")
# OK, now remove it from the DB
session.delete(trigger_orm)
session.commit()
# Re-load the triggers
job_runner.load_triggers()
# Wait for up to 3 seconds for it to vanish from the TriggerRunner's storage
for _ in range(30):
if not job_runner.trigger_runner.triggers:
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never deleted trigger")
finally:
# We always have to stop the runner
job_runner.trigger_runner.stop = True
job_runner.trigger_runner.join(30) |
This verifies the resolution of race condition documented in github issue #18392.
Triggers are queued for creation by TriggerJob.load_triggers.
There was a race condition where multiple triggers would be created unnecessarily.
What happens is the runner completes the trigger and purges from the "running" list.
Then job.load_triggers is called and it looks like the trigger is not running but should,
so it queues it again.
The scenario is as follows:
1. job.load_triggers (trigger now queued)
2. runner.create_triggers (trigger now running)
3. job.handle_events (trigger still appears running so state not updated in DB)
4. runner.cleanup_finished_triggers (trigger completed at this point; trigger from "running" set)
5. job.load_triggers (trigger not running, but also not purged from DB, so it is queued again)
6. runner.create_triggers (trigger created again)
This test verifies that under this scenario only one trigger is created. | def test_trigger_create_race_condition_18392(session, tmp_path):
"""
This verifies the resolution of race condition documented in github issue #18392.
Triggers are queued for creation by TriggerJob.load_triggers.
There was a race condition where multiple triggers would be created unnecessarily.
What happens is the runner completes the trigger and purges from the "running" list.
Then job.load_triggers is called and it looks like the trigger is not running but should,
so it queues it again.
The scenario is as follows:
1. job.load_triggers (trigger now queued)
2. runner.create_triggers (trigger now running)
3. job.handle_events (trigger still appears running so state not updated in DB)
4. runner.cleanup_finished_triggers (trigger completed at this point; trigger from "running" set)
5. job.load_triggers (trigger not running, but also not purged from DB, so it is queued again)
6. runner.create_triggers (trigger created again)
This test verifies that under this scenario only one trigger is created.
"""
path = tmp_path / "test_trigger_bad_respawn.txt"
class TriggerRunner_(TriggerRunner):
"""We do some waiting for main thread looping"""
async def wait_for_job_method_count(self, method, count):
for _ in range(30):
await asyncio.sleep(0.1)
if getattr(self, f"{method}_count", 0) >= count:
break
else:
pytest.fail(f"did not observe count {count} in job method {method}")
async def create_triggers(self):
"""
On first run, wait for job.load_triggers to make sure they are queued
"""
if getattr(self, "loop_count", 0) == 0:
await self.wait_for_job_method_count("load_triggers", 1)
await super().create_triggers()
self.loop_count = getattr(self, "loop_count", 0) + 1
async def cleanup_finished_triggers(self):
"""On loop 1, make sure that job.handle_events was already called"""
if self.loop_count == 1:
await self.wait_for_job_method_count("handle_events", 1)
await super().cleanup_finished_triggers()
class TriggererJob_(TriggererJobRunner):
"""We do some waiting for runner thread looping (and track calls in job thread)"""
def wait_for_runner_loop(self, runner_loop_count):
for _ in range(30):
time.sleep(0.1)
if getattr(self.trigger_runner, "call_count", 0) >= runner_loop_count:
break
else:
pytest.fail("did not observe 2 loops in the runner thread")
def load_triggers(self):
"""On second run, make sure that runner has called create_triggers in its second loop"""
super().load_triggers()
self.trigger_runner.load_triggers_count = (
getattr(self.trigger_runner, "load_triggers_count", 0) + 1
)
if self.trigger_runner.load_triggers_count == 2:
self.wait_for_runner_loop(runner_loop_count=2)
def handle_events(self):
super().handle_events()
self.trigger_runner.handle_events_count = (
getattr(self.trigger_runner, "handle_events_count", 0) + 1
)
trigger = TimeDeltaTrigger_(delta=datetime.timedelta(microseconds=1), filename=path.as_posix())
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
dag = DagModel(dag_id="test-dag")
dag_run = DagRun(dag.dag_id, run_id="abc", run_type="none")
ti = TaskInstance(PythonOperator(task_id="dummy-task", python_callable=print), run_id=dag_run.run_id)
ti.dag_id = dag.dag_id
ti.trigger_id = 1
session.add(dag)
session.add(dag_run)
session.add(ti)
session.commit()
job = Job()
job_runner = TriggererJob_(job)
job_runner.trigger_runner = TriggerRunner_()
thread = Thread(target=job_runner._execute)
thread.start()
try:
for _ in range(40):
time.sleep(0.1)
# ready to evaluate after 2 loops
if getattr(job_runner.trigger_runner, "loop_count", 0) >= 2:
break
else:
pytest.fail("did not observe 2 loops in the runner thread")
finally:
job_runner.trigger_runner.stop = True
job_runner.trigger_runner.join(30)
thread.join()
instances = path.read_text().splitlines()
assert len(instances) == 1 |
Checks that the triggerer will correctly claim a Trigger that is assigned to a
triggerer that does not exist. | def test_trigger_from_dead_triggerer(session, create_task_instance):
"""
Checks that the triggerer will correctly claim a Trigger that is assigned to a
triggerer that does not exist.
"""
# Use a trigger that has an invalid triggerer_id
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
trigger_orm.triggerer_id = 999 # Non-existent triggerer
session.add(trigger_orm)
ti_orm = create_task_instance(
task_id="ti_orm",
execution_date=timezone.utcnow(),
run_id="orm_run_id",
)
ti_orm.trigger_id = trigger_orm.id
session.add(trigger_orm)
session.commit()
# Make a TriggererJobRunner and have it retrieve DB tasks
job = Job()
job_runner = TriggererJobRunner(job)
job_runner.load_triggers()
# Make sure it turned up in TriggerRunner's queue
assert [x for x, y in job_runner.trigger_runner.to_create] == [1] |
Checks that the triggerer will correctly claim a Trigger that is assigned to a
triggerer that has an expired heartbeat. | def test_trigger_from_expired_triggerer(session, create_task_instance):
"""
Checks that the triggerer will correctly claim a Trigger that is assigned to a
triggerer that has an expired heartbeat.
"""
# Use a trigger assigned to the expired triggerer
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
trigger_orm.triggerer_id = 42
session.add(trigger_orm)
ti_orm = create_task_instance(
task_id="ti_orm",
execution_date=timezone.utcnow(),
run_id="orm_run_id",
)
ti_orm.trigger_id = trigger_orm.id
session.add(trigger_orm)
# Use a TriggererJobRunner with an expired heartbeat
triggerer_job_orm = Job(TriggererJobRunner.job_type)
triggerer_job_orm.id = 42
triggerer_job_orm.start_date = timezone.utcnow() - datetime.timedelta(hours=1)
triggerer_job_orm.end_date = None
triggerer_job_orm.latest_heartbeat = timezone.utcnow() - datetime.timedelta(hours=1)
session.add(triggerer_job_orm)
session.commit()
# Make a TriggererJobRunner and have it retrieve DB tasks
job = Job(TriggererJobRunner.job_type)
job_runner = TriggererJobRunner(job)
job_runner.load_triggers()
# Make sure it turned up in TriggerRunner's queue
assert [x for x, y in job_runner.trigger_runner.to_create] == [1] |
Checks that if an exception occurs when creating triggers, that the triggerer
process stops | def test_trigger_runner_exception_stops_triggerer(session):
"""
Checks that if an exception occurs when creating triggers, that the triggerer
process stops
"""
class MockTriggerException(Exception):
pass
class TriggerRunner_(TriggerRunner):
async def create_triggers(self):
raise MockTriggerException("Trigger creation failed")
# Use a trigger that will immediately succeed
trigger = SuccessTrigger()
create_trigger_in_db(session, trigger)
# Make a TriggererJobRunner and have it retrieve DB tasks
job = Job()
job_runner = TriggererJobRunner(job)
job_runner.trigger_runner = TriggerRunner_()
thread = Thread(target=job_runner._execute)
thread.start()
# Wait 4 seconds for the triggerer to stop
try:
for _ in range(40):
time.sleep(0.1)
if not thread.is_alive():
break
else:
pytest.fail("TriggererJobRunner did not stop after exception in TriggerRunner")
if not job_runner.trigger_runner.stop:
pytest.fail("TriggerRunner not marked as stopped after exception in TriggerRunner")
finally:
job_runner.trigger_runner.stop = True
# with suppress(MockTriggerException):
job_runner.trigger_runner.join(30)
thread.join() |
Checks that when a trigger fires, it correctly makes it into the
event queue. | def test_trigger_firing(session):
"""
Checks that when a trigger fires, it correctly makes it into the
event queue.
"""
# Use a trigger that will immediately succeed
trigger = SuccessTrigger()
create_trigger_in_db(session, trigger)
# Make a TriggererJobRunner and have it retrieve DB tasks
job = Job()
job_runner = TriggererJobRunner(job)
job_runner.load_triggers()
# Now, start TriggerRunner up (and set it as a daemon thread during tests)
job_runner.daemon = True
job_runner.trigger_runner.start()
try:
# Wait for up to 3 seconds for it to fire and appear in the event queue
for _ in range(30):
if job_runner.trigger_runner.events:
assert list(job_runner.trigger_runner.events) == [(1, TriggerEvent(True))]
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never sent the trigger event out")
finally:
# We always have to stop the runner
job_runner.trigger_runner.stop = True
job_runner.trigger_runner.join(30) |
Checks that when a trigger fails, it correctly makes it into the
failure queue. | def test_trigger_failing(session):
"""
Checks that when a trigger fails, it correctly makes it into the
failure queue.
"""
# Use a trigger that will immediately fail
trigger = FailureTrigger()
create_trigger_in_db(session, trigger)
# Make a TriggererJobRunner and have it retrieve DB tasks
job = Job()
job_runner = TriggererJobRunner(job)
job_runner.load_triggers()
# Now, start TriggerRunner up (and set it as a daemon thread during tests)
job_runner.daemon = True
job_runner.trigger_runner.start()
try:
# Wait for up to 3 seconds for it to fire and appear in the event queue
for _ in range(30):
if job_runner.trigger_runner.failed_triggers:
assert len(job_runner.trigger_runner.failed_triggers) == 1
trigger_id, exc = next(iter(job_runner.trigger_runner.failed_triggers))
assert trigger_id == 1
assert isinstance(exc, ValueError)
assert exc.args[0] == "Deliberate trigger failure"
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never marked the trigger as failed")
finally:
# We always have to stop the runner
job_runner.trigger_runner.stop = True
job_runner.trigger_runner.join(30) |
Checks that the triggerer will correctly clean up triggers that do not
have any task instances depending on them. | def test_trigger_cleanup(session):
"""
Checks that the triggerer will correctly clean up triggers that do not
have any task instances depending on them.
"""
# Use a trigger that will not fire for the lifetime of the test
# (we want to avoid it firing and deleting itself)
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
# Trigger the cleanup code
Trigger.clean_unused(session=session)
session.commit()
# Make sure it's gone
assert session.query(Trigger).count() == 0 |
Checks that the triggerer will correctly fail task instances that depend on
triggers that can't even be loaded. | def test_invalid_trigger(session, dag_maker):
"""
Checks that the triggerer will correctly fail task instances that depend on
triggers that can't even be loaded.
"""
# Create a totally invalid trigger
trigger_orm = Trigger(classpath="fake.classpath", kwargs={})
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
# Create the test DAG and task
with dag_maker(dag_id="test_invalid_trigger", session=session):
EmptyOperator(task_id="dummy1")
dr = dag_maker.create_dagrun()
task_instance = dr.task_instances[0]
# Make a task instance based on that and tie it to the trigger
task_instance.state = TaskInstanceState.DEFERRED
task_instance.trigger_id = 1
session.commit()
# Make a TriggererJobRunner and have it retrieve DB tasks
job = Job()
job_runner = TriggererJobRunner(job)
job_runner.load_triggers()
# Make sure it turned up in the failed queue
assert len(job_runner.trigger_runner.failed_triggers) == 1
# Run the failed trigger handler
job_runner.handle_failed_triggers()
# Make sure it marked the task instance as failed (which is actually the
# scheduled state with a payload to make it fail)
task_instance.refresh_from_db()
assert task_instance.state == TaskInstanceState.SCHEDULED
assert task_instance.next_method == "__fail__"
assert task_instance.next_kwargs["error"] == "Trigger failure"
assert task_instance.next_kwargs["traceback"][-1] == "ModuleNotFoundError: No module named 'fake'\n" |
When listener func called, root handlers should be moved to queue listener
and replaced with queuehandler. | def test_queue_listener():
"""
When listener func called, root handlers should be moved to queue listener
and replaced with queuehandler.
"""
reset_logging()
importlib.reload(airflow_local_settings)
configure_logging()
def non_pytest_handlers(val):
return [h for h in val if "pytest" not in h.__module__]
import logging
log = logging.getLogger()
handlers = non_pytest_handlers(log.handlers)
assert len(handlers) == 1
handler = handlers[0]
assert handler.__class__ == RedirectStdHandler
listener = setup_queue_listener()
assert handler not in non_pytest_handlers(log.handlers)
qh = log.handlers[-1]
assert qh.__class__ == LocalQueueHandler
assert qh.queue == listener.queue
listener.stop() |
root logger: RedirectStdHandler
task: FTH
result: wrap | def test_configure_trigger_log_handler_file():
"""
root logger: RedirectStdHandler
task: FTH
result: wrap
"""
# reset logging
root_logger = logging.getLogger()
clear_logger_handlers(root_logger)
configure_logging()
# before config
assert_handlers(root_logger, RedirectStdHandler)
# default task logger
task_logger = logging.getLogger("airflow.task")
task_handlers = assert_handlers(task_logger, FileTaskHandler)
# not yet configured to use wrapper
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
triggerer_job_runner.configure_trigger_log_handler()
# after config
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is True
root_handlers = assert_handlers(root_logger, RedirectStdHandler, TriggererHandlerWrapper)
assert root_handlers[1].base_handler == task_handlers[0]
# other handlers have DropTriggerLogsFilter
assert root_handlers[0].filters[1].__class__ == DropTriggerLogsFilter
# no filters on wrapper handler
assert root_handlers[1].filters == []
# wrapper handler uses handler from airflow.task
assert root_handlers[1].base_handler.__class__ == FileTaskHandler |
root logger: RedirectStdHandler
task: S3TH
result: wrap | def test_configure_trigger_log_handler_s3():
"""
root logger: RedirectStdHandler
task: S3TH
result: wrap
"""
with conf_vars(
{
("logging", "remote_logging"): "True",
("logging", "remote_log_conn_id"): "some_aws",
("logging", "remote_base_log_folder"): "s3://some-folder",
}
):
importlib.reload(airflow_local_settings)
configure_logging()
# before config
root_logger = logging.getLogger()
assert_handlers(root_logger, RedirectStdHandler)
# default task logger
task_logger = logging.getLogger("airflow.task")
task_handlers = assert_handlers(task_logger, S3TaskHandler)
# not yet configured to use wrapper
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
triggerer_job_runner.configure_trigger_log_handler()
# after config
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is True
handlers = assert_handlers(root_logger, RedirectStdHandler, TriggererHandlerWrapper)
assert handlers[1].base_handler == task_handlers[0]
# other handlers have DropTriggerLogsFilter
assert handlers[0].filters[1].__class__ == DropTriggerLogsFilter
# no filters on wrapper handler
assert handlers[1].filters == []
# wrapper handler uses handler from airflow.task
assert handlers[1].base_handler.__class__ == S3TaskHandler |
No root handler configured.
When non FileTaskHandler is configured, don't modify.
When an incompatible subclass of FileTaskHandler is configured, don't modify. | def test_configure_trigger_log_handler_not_file_task_handler(cfg, cls, msg):
"""
No root handler configured.
When non FileTaskHandler is configured, don't modify.
When an incompatible subclass of FileTaskHandler is configured, don't modify.
"""
# reset handlers
root_logger = logging.getLogger()
clear_logger_handlers(root_logger)
with conf_vars(
{
(
"logging",
"logging_config_class",
): f"tests.jobs.test_triggerer_job_logging.{cfg}",
}
):
importlib.reload(airflow_local_settings)
configure_logging()
# no root handlers
assert_handlers(root_logger)
# default task logger
task_logger = logging.getLogger("airflow.task")
assert_handlers(task_logger, cls)
# not yet configured to use wrapper
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
with warnings.catch_warnings(record=True) as captured:
triggerer_job_runner.configure_trigger_log_handler()
assert [x.message.args[0] for x in captured] == msg
# after config
# doesn't use TriggererHandlerWrapper, no change in handler
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
# still no root handlers
assert_handlers(root_logger) |
root: no handler
task: FTH
result: wrap | def test_configure_trigger_log_handler_fallback_task():
"""
root: no handler
task: FTH
result: wrap
"""
with conf_vars(
{
("logging", "logging_config_class"): "tests.jobs.test_triggerer_job_logging.fallback_task",
}
):
importlib.reload(airflow_local_settings)
configure_logging()
# check custom config used
task_logger = logging.getLogger("airflow.task")
assert_handlers(task_logger, S3TaskHandler)
# before config
root_logger = logging.getLogger()
assert_handlers(root_logger)
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
triggerer_job_runner.configure_trigger_log_handler()
# after config
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is True
handlers = assert_handlers(root_logger, TriggererHandlerWrapper)
assert handlers[0].base_handler == task_logger.handlers[0]
# no filters on wrapper handler
assert handlers[0].filters == [] |
root logger: single handler that supports triggerer
result: wrap | def test_configure_trigger_log_handler_root_has_task_handler():
"""
root logger: single handler that supports triggerer
result: wrap
"""
with conf_vars(
{
(
"logging",
"logging_config_class",
): "tests.jobs.test_triggerer_job_logging.root_has_task_handler",
}
):
configure_logging()
# check custom config used
task_logger = logging.getLogger("airflow.task")
assert_handlers(task_logger, logging.Handler)
# before config
root_logger = logging.getLogger()
assert_handlers(root_logger, FileTaskHandler)
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
# configure
triggerer_job_runner.configure_trigger_log_handler()
# after config
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is True
handlers = assert_handlers(root_logger, TriggererHandlerWrapper)
# no filters on wrapper handler
assert handlers[0].filters == []
# wrapper handler uses handler from airflow.task
assert handlers[0].base_handler.__class__ == FileTaskHandler |
root: A handler that doesn't support trigger or inherit FileTaskHandler
task: Supports triggerer
Result:
* wrap and use the task logger handler
* other root handlers filter trigger logging | def test_configure_trigger_log_handler_root_not_file_task():
"""
root: A handler that doesn't support trigger or inherit FileTaskHandler
task: Supports triggerer
Result:
* wrap and use the task logger handler
* other root handlers filter trigger logging
"""
with conf_vars(
{
(
"logging",
"logging_config_class",
): "tests.jobs.test_triggerer_job_logging.root_not_file_task",
}
):
configure_logging()
# check custom config used
task_logger = logging.getLogger("airflow.task")
assert_handlers(task_logger, S3TaskHandler)
# before config
root_logger = logging.getLogger()
assert_handlers(root_logger, logging.Handler)
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
# configure
with warnings.catch_warnings(record=True) as captured:
triggerer_job_runner.configure_trigger_log_handler()
assert captured == []
# after config
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is True
handlers = assert_handlers(root_logger, logging.Handler, TriggererHandlerWrapper)
# other handlers have DropTriggerLogsFilter
assert handlers[0].filters[0].__class__ == DropTriggerLogsFilter
# no filters on wrapper handler
assert handlers[1].filters == []
# wrapper handler uses handler from airflow.task
assert handlers[1].base_handler.__class__ == S3TaskHandler |
Root logger handler: An older subclass of FileTaskHandler that doesn't support triggerer
Task logger handler: Supports triggerer
Result:
* wrap and use the task logger handler
* other root handlers filter trigger logging | def test_configure_trigger_log_handler_root_old_file_task():
"""
Root logger handler: An older subclass of FileTaskHandler that doesn't support triggerer
Task logger handler: Supports triggerer
Result:
* wrap and use the task logger handler
* other root handlers filter trigger logging
"""
with conf_vars(
{
(
"logging",
"logging_config_class",
): "tests.jobs.test_triggerer_job_logging.root_logger_old_file_task",
}
):
configure_logging()
# check custom config used
assert_handlers(logging.getLogger("airflow.task"), S3TaskHandler)
# before config
root_logger = logging.getLogger()
assert_handlers(root_logger, OldFileTaskHandler)
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is False
with warnings.catch_warnings(record=True) as captured:
triggerer_job_runner.configure_trigger_log_handler()
# since a root logger is explicitly configured with an old FileTaskHandler which doesn't
# work properly with individual trigger logging, warn
# todo: we should probably just remove the handler in this case it's basically misconfiguration
assert [x.message.args[0] for x in captured] == [
"Handler OldFileTaskHandler does not support individual trigger logging. "
"Please check the release notes for your provider to see if a newer version "
"supports individual trigger logging.",
]
# after config
assert triggerer_job_runner.HANDLER_SUPPORTS_TRIGGERER is True
handlers = assert_handlers(root_logger, OldFileTaskHandler, TriggererHandlerWrapper)
# other handlers have DropTriggerLogsFilter
assert handlers[0].filters[0].__class__ == DropTriggerLogsFilter
# no filters on wrapper handler
assert handlers[1].filters == []
# wrapper handler uses handler from airflow.task
assert handlers[1].base_handler.__class__ == S3TaskHandler |
Verify if operator attributes are correctly templated. | def test_render_template_fields_logging(
caplog, monkeypatch, task, context, expected_exception, expected_rendering, expected_log, not_expected_log
):
"""Verify if operator attributes are correctly templated."""
# Trigger templating and verify results
def _do_render():
task.render_template_fields(context=context)
logger = logging.getLogger("airflow.task")
monkeypatch.setattr(logger, "propagate", True)
if expected_exception:
with pytest.raises(expected_exception):
_do_render()
else:
_do_render()
for k, v in expected_rendering.items():
assert getattr(task, k) == v
if expected_log:
assert expected_log in caplog.text
if not_expected_log:
assert not_expected_log not in caplog.text |
For a given dag run, get a dict of states.
Example::
{
"my_setup": "success",
"my_teardown": {0: "success", 1: "success", 2: "success"},
"my_work": "failed",
} | def get_states(dr):
"""
For a given dag run, get a dict of states.
Example::
{
"my_setup": "success",
"my_teardown": {0: "success", 1: "success", 2: "success"},
"my_work": "failed",
}
"""
ti_dict = defaultdict(dict)
for ti in dr.get_task_instances():
if ti.map_index == -1:
ti_dict[ti.task_id] = ti.state
else:
ti_dict[ti.task_id][ti.map_index] = ti.state
return dict(ti_dict) |
when fail_stop enabled, teardowns should run according to their setups.
in this case, the second teardown skips because its setup skips. | def test_teardown_and_fail_stop(dag_maker):
"""
when fail_stop enabled, teardowns should run according to their setups.
in this case, the second teardown skips because its setup skips.
"""
with dag_maker(fail_stop=True) as dag:
for num in (1, 2):
with TaskGroup(f"tg_{num}"):
@task_decorator
def my_setup():
print("setting up multiple things")
return [1, 2, 3]
@task_decorator
def my_work(val):
print(f"doing work with multiple things: {val}")
raise ValueError("this fails")
return val
@task_decorator
def my_teardown():
print("teardown")
s = my_setup()
t = my_teardown().as_teardown(setups=s)
with t:
my_work(s)
tg1, tg2 = dag.task_group.children.values()
tg1 >> tg2
dr = dag.test()
states = get_states(dr)
expected = {
"tg_1.my_setup": "success",
"tg_1.my_teardown": "success",
"tg_1.my_work": "failed",
"tg_2.my_setup": "skipped",
"tg_2.my_teardown": "skipped",
"tg_2.my_work": "skipped",
}
assert states == expected |
Test that set_task_instance_state updates the TaskInstance state and clear downstream failed | def test_set_task_instance_state(run_id, execution_date, session, dag_maker):
"""Test that set_task_instance_state updates the TaskInstance state and clear downstream failed"""
start_date = datetime_tz(2020, 1, 1)
with dag_maker("test_set_task_instance_state", start_date=start_date, session=session) as dag:
task_1 = EmptyOperator(task_id="task_1")
task_2 = EmptyOperator(task_id="task_2")
task_3 = EmptyOperator(task_id="task_3")
task_4 = EmptyOperator(task_id="task_4")
task_5 = EmptyOperator(task_id="task_5")
task_1 >> [task_2, task_3, task_4, task_5]
dagrun = dag_maker.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.FAILED,
run_type=DagRunType.SCHEDULED,
)
def get_ti_from_db(task):
return (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == task.task_id,
TI.run_id == dagrun.run_id,
)
.one()
)
get_ti_from_db(task_1).state = State.FAILED
get_ti_from_db(task_2).state = State.SUCCESS
get_ti_from_db(task_3).state = State.UPSTREAM_FAILED
get_ti_from_db(task_4).state = State.FAILED
get_ti_from_db(task_5).state = State.SKIPPED
session.flush()
altered = dag.set_task_instance_state(
task_id=task_1.task_id,
run_id=run_id,
execution_date=execution_date,
state=State.SUCCESS,
session=session,
)
# After _mark_task_instance_state, task_1 is marked as SUCCESS
ti1 = get_ti_from_db(task_1)
assert ti1.state == State.SUCCESS
# TIs should have DagRun pre-loaded
assert isinstance(inspect(ti1).attrs.dag_run.loaded_value, DagRun)
# task_2 remains as SUCCESS
assert get_ti_from_db(task_2).state == State.SUCCESS
# task_3 and task_4 are cleared because they were in FAILED/UPSTREAM_FAILED state
assert get_ti_from_db(task_3).state == State.NONE
assert get_ti_from_db(task_4).state == State.NONE
# task_5 remains as SKIPPED
assert get_ti_from_db(task_5).state == State.SKIPPED
dagrun.refresh_from_db(session=session)
# dagrun should be set to QUEUED
assert dagrun.get_state() == State.QUEUED
assert {t.key for t in altered} == {("test_set_task_instance_state", "task_1", dagrun.run_id, 1, -1)} |
Test that when setting an individual mapped TI that the other TIs are not affected | def test_set_task_instance_state_mapped(dag_maker, session):
"""Test that when setting an individual mapped TI that the other TIs are not affected"""
task_id = "t1"
with dag_maker(session=session) as dag:
@dag.task
def make_arg_lists():
return [[1], [2], [{"a": "b"}]]
def consumer(value):
print(value)
mapped = PythonOperator.partial(task_id=task_id, dag=dag, python_callable=consumer).expand(
op_args=make_arg_lists()
)
mapped >> BaseOperator(task_id="downstream")
dr1 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=DagRunState.FAILED,
)
expand_mapped_task(mapped, dr1.run_id, "make_arg_lists", length=2, session=session)
# set_state(future=True) only applies to scheduled runs
dr2 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=DagRunState.FAILED,
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
)
expand_mapped_task(mapped, dr2.run_id, "make_arg_lists", length=2, session=session)
session.query(TI).filter_by(dag_id=dag.dag_id).update({"state": TaskInstanceState.FAILED})
ti_query = (
session.query(TI.task_id, TI.map_index, TI.run_id, TI.state)
.filter(TI.dag_id == dag.dag_id, TI.task_id.in_([task_id, "downstream"]))
.order_by(TI.run_id, TI.task_id, TI.map_index)
)
# Check pre-conditions
assert ti_query.all() == [
("downstream", -1, dr1.run_id, TaskInstanceState.FAILED),
(task_id, 0, dr1.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr1.run_id, TaskInstanceState.FAILED),
("downstream", -1, dr2.run_id, TaskInstanceState.FAILED),
(task_id, 0, dr2.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr2.run_id, TaskInstanceState.FAILED),
]
dag.set_task_instance_state(
task_id=task_id,
map_indexes=[1],
future=True,
run_id=dr1.run_id,
state=TaskInstanceState.SUCCESS,
session=session,
)
assert dr1 in session, "Check session is passed down all the way"
assert ti_query.all() == [
("downstream", -1, dr1.run_id, None),
(task_id, 0, dr1.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr1.run_id, TaskInstanceState.SUCCESS),
("downstream", -1, dr2.run_id, None),
(task_id, 0, dr2.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr2.run_id, TaskInstanceState.SUCCESS),
] |
Test that set_task_group_state updates the TaskGroup state and clear downstream failed | def test_set_task_group_state(run_id, execution_date, session, dag_maker):
"""Test that set_task_group_state updates the TaskGroup state and clear downstream failed"""
start_date = datetime_tz(2020, 1, 1)
with dag_maker("test_set_task_group_state", start_date=start_date, session=session) as dag:
start = EmptyOperator(task_id="start")
with TaskGroup("section_1", tooltip="Tasks for section_1") as section_1:
task_1 = EmptyOperator(task_id="task_1")
task_2 = EmptyOperator(task_id="task_2")
task_3 = EmptyOperator(task_id="task_3")
task_1 >> [task_2, task_3]
task_4 = EmptyOperator(task_id="task_4")
task_5 = EmptyOperator(task_id="task_5")
task_6 = EmptyOperator(task_id="task_6")
task_7 = EmptyOperator(task_id="task_7")
task_8 = EmptyOperator(task_id="task_8")
start >> section_1 >> [task_4, task_5, task_6, task_7, task_8]
dagrun = dag_maker.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.FAILED,
run_type=DagRunType.SCHEDULED,
)
def get_ti_from_db(task):
return (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == task.task_id,
TI.run_id == dagrun.run_id,
)
.one()
)
get_ti_from_db(task_1).state = State.FAILED
get_ti_from_db(task_2).state = State.SUCCESS
get_ti_from_db(task_3).state = State.UPSTREAM_FAILED
get_ti_from_db(task_4).state = State.SUCCESS
get_ti_from_db(task_5).state = State.UPSTREAM_FAILED
get_ti_from_db(task_6).state = State.FAILED
get_ti_from_db(task_7).state = State.SKIPPED
session.flush()
altered = dag.set_task_group_state(
group_id=section_1.group_id,
run_id=run_id,
execution_date=execution_date,
state=State.SUCCESS,
session=session,
)
# After _mark_task_instance_state, task_1 is marked as SUCCESS
assert get_ti_from_db(task_1).state == State.SUCCESS
# task_2 remains as SUCCESS
assert get_ti_from_db(task_2).state == State.SUCCESS
# task_3 should be marked as SUCCESS
assert get_ti_from_db(task_3).state == State.SUCCESS
# task_4 should remain as SUCCESS
assert get_ti_from_db(task_4).state == State.SUCCESS
# task_5 and task_6 are cleared because they were in FAILED/UPSTREAM_FAILED state
assert get_ti_from_db(task_5).state == State.NONE
assert get_ti_from_db(task_6).state == State.NONE
# task_7 remains as SKIPPED
assert get_ti_from_db(task_7).state == State.SKIPPED
dagrun.refresh_from_db(session=session)
# dagrun should be set to QUEUED
assert dagrun.get_state() == State.QUEUED
assert {t.key for t in altered} == {
("test_set_task_group_state", "section_1.task_1", dagrun.run_id, 1, -1),
("test_set_task_group_state", "section_1.task_3", dagrun.run_id, 1, -1),
} |
Loads DAGs from a module for test. | def make_example_dags(module):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module.__path__[0])
return dagbag.dags |
Test that tasks with specific dates are only created for backfill runs | def test_verify_integrity_task_start_and_end_date(Stats_incr, session, run_type, expected_tis):
"""Test that tasks with specific dates are only created for backfill runs"""
with DAG("test", start_date=DEFAULT_DATE) as dag:
EmptyOperator(task_id="without")
EmptyOperator(task_id="with_start_date", start_date=DEFAULT_DATE + datetime.timedelta(1))
EmptyOperator(task_id="with_end_date", end_date=DEFAULT_DATE - datetime.timedelta(1))
dag_run = DagRun(
dag_id=dag.dag_id,
run_type=run_type,
execution_date=DEFAULT_DATE,
run_id=DagRun.generate_run_id(run_type, DEFAULT_DATE),
)
dag_run.dag = dag
session.add(dag_run)
session.flush()
dag_run.verify_integrity(session=session)
tis = dag_run.task_instances
assert len(tis) == expected_tis
Stats_incr.assert_any_call(
"task_instance_created_EmptyOperator", expected_tis, tags={"dag_id": "test", "run_type": run_type}
)
Stats_incr.assert_any_call(
"task_instance_created",
expected_tis,
tags={"dag_id": "test", "run_type": run_type, "task_type": "EmptyOperator"},
) |
Test that when the length of a mapped literal changes we remove extra TIs | def test_mapped_literal_verify_integrity(dag_maker, session):
"""Test that when the length of a mapped literal changes we remove extra TIs"""
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
# Now "change" the DAG and we should see verify_integrity REMOVE some TIs
dag._remove_task("task_2")
with dag:
mapped = task_2.expand(arg2=[1, 2]).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
dr.verify_integrity()
indices = (
session.query(TI.map_index, TI.state)
.filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
.all()
)
assert indices == [(0, None), (1, None), (2, TaskInstanceState.REMOVED), (3, TaskInstanceState.REMOVED)] |
Test that when we change from literal to a XComArg the TIs are removed | def test_mapped_literal_to_xcom_arg_verify_integrity(dag_maker, session):
"""Test that when we change from literal to a XComArg the TIs are removed"""
with dag_maker(session=session) as dag:
t1 = BaseOperator(task_id="task_1")
@task
def task_2(arg2): ...
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
# Now "change" the DAG and we should see verify_integrity REMOVE some TIs
dag._remove_task("task_2")
with dag:
mapped = task_2.expand(arg2=t1.output).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
dr.verify_integrity()
indices = (
session.query(TI.map_index, TI.state)
.filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
.all()
)
assert indices == [
(0, TaskInstanceState.REMOVED),
(1, TaskInstanceState.REMOVED),
(2, TaskInstanceState.REMOVED),
(3, TaskInstanceState.REMOVED),
] |
Test that when the length of mapped literal increases, additional ti is added | def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):
"""Test that when the length of mapped literal increases, additional ti is added"""
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
]
# Now "increase" the length of literal
dag._remove_task("task_2")
with dag:
task_2.expand(arg2=[1, 2, 3, 4, 5]).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Every mapped task is revised at task_instance_scheduling_decision
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
(4, State.NONE),
] |
Test that when the length of mapped literal reduces, removed state is added | def test_mapped_literal_length_reduction_adds_removed_state(dag_maker, session):
"""Test that when the length of mapped literal reduces, removed state is added"""
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
]
# Now "reduce" the length of literal
dag._remove_task("task_2")
with dag:
task_2.expand(arg2=[1, 2]).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Since we change the literal on the dag file itself, the dag_hash will
# change which will have the scheduler verify the dr integrity
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.REMOVED),
(3, State.REMOVED),
] |
Test that when the length of mapped literal increases at runtime, additional ti is added | def test_mapped_length_increase_at_runtime_adds_additional_tis(dag_maker, session):
"""Test that when the length of mapped literal increases at runtime, additional ti is added"""
from airflow.models import Variable
Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
return Variable.get("arg1", deserialize_json=True)
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
]
# Now "clear" and "increase" the length of literal
dag.clear()
Variable.set(key="arg1", value=[1, 2, 3, 4])
with dag:
task_2.expand(arg2=task_1()).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Run the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1")
task1 = dag.get_task("task_1")
ti.refresh_from_task(task1)
ti.run()
# this would be called by the localtask job
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
] |
Test that when the length of mapped literal reduces at runtime, the missing task instances
are marked as removed | def test_mapped_literal_length_reduction_at_runtime_adds_removed_state(dag_maker, session):
"""
Test that when the length of mapped literal reduces at runtime, the missing task instances
are marked as removed
"""
from airflow.models import Variable
Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
return Variable.get("arg1", deserialize_json=True)
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
]
# Now "clear" and "reduce" the length of literal
dag.clear()
Variable.set(key="arg1", value=[1, 2])
with dag:
task_2.expand(arg2=task_1()).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Run the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1")
task1 = dag.get_task("task_1")
ti.refresh_from_task(task1)
ti.run()
# this would be called by the localtask job
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, TaskInstanceState.REMOVED),
] |
This test tries to recreate a faulty state in the database and checks if we can recover from it.
The state that happens is that there exists mapped task instances and the unmapped task instance.
So we have instances with map_index [-1, 0, 1]. The -1 task instances should be removed in this case. | def test_mapped_literal_faulty_state_in_db(dag_maker, session):
"""
This test tries to recreate a faulty state in the database and checks if we can recover from it.
The state that happens is that there exists mapped task instances and the unmapped task instance.
So we have instances with map_index [-1, 0, 1]. The -1 task instances should be removed in this case.
"""
with dag_maker(session=session) as dag:
@task
def task_1():
return [1, 2]
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
decision = dr.task_instance_scheduling_decisions()
assert len(decision.schedulable_tis) == 2
# We insert a faulty record
session.add(TaskInstance(dag.get_task("task_2"), dr.execution_date, dr.run_id))
session.flush()
decision = dr.task_instance_scheduling_decisions()
assert len(decision.schedulable_tis) == 2 |
Test that when there's no change to mapped task indexes at runtime, the dagrun.verify_integrity
is not called | def test_mapped_literal_length_with_no_change_at_runtime_doesnt_call_verify_integrity(dag_maker, session):
"""
Test that when there's no change to mapped task indexes at runtime, the dagrun.verify_integrity
is not called
"""
from airflow.models import Variable
Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
return Variable.get("arg1", deserialize_json=True)
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
]
# Now "clear" and no change to length
dag.clear()
Variable.set(key="arg1", value=[1, 2, 3])
with dag:
task_2.expand(arg2=task_1()).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Run the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1")
task1 = dag.get_task("task_1")
ti.refresh_from_task(task1)
ti.run()
# this would be called by the localtask job
# Verify that DagRun.verify_integrity is not called
with mock.patch("airflow.models.dagrun.DagRun.verify_integrity") as mock_verify_integrity:
dr.task_instance_scheduling_decisions()
mock_verify_integrity.assert_not_called() |
Test increase in mapped task at runtime with calls to dagrun.verify_integrity | def test_calls_to_verify_integrity_with_mapped_task_increase_at_runtime(dag_maker, session):
"""
Test increase in mapped task at runtime with calls to dagrun.verify_integrity
"""
from airflow.models import Variable
Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
return Variable.get("arg1", deserialize_json=True)
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
]
# Now "clear" and "increase" the length of literal
dag.clear()
Variable.set(key="arg1", value=[1, 2, 3, 4, 5])
with dag:
task_2.expand(arg2=task_1()).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Run the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1")
task1 = dag.get_task("task_1")
ti.refresh_from_task(task1)
ti.run()
task2 = dag.get_task("task_2")
for ti in dr.get_task_instances():
if ti.map_index < 0:
ti.task = task1
else:
ti.task = task2
session.merge(ti)
session.flush()
# create the additional task
dr.task_instance_scheduling_decisions()
# Run verify_integrity as a whole and assert new tasks were added
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
(4, State.NONE),
]
ti3 = dr.get_task_instance(task_id="task_2", map_index=3)
ti3.task = task2
ti3.state = TaskInstanceState.FAILED
session.merge(ti3)
session.flush()
# assert repeated calls did not change the instances
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, TaskInstanceState.FAILED),
(4, State.NONE),
] |
Test reduction in mapped task at runtime with calls to dagrun.verify_integrity | def test_calls_to_verify_integrity_with_mapped_task_reduction_at_runtime(dag_maker, session):
"""
Test reduction in mapped task at runtime with calls to dagrun.verify_integrity
"""
from airflow.models import Variable
Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
return Variable.get("arg1", deserialize_json=True)
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
]
# Now "clear" and "reduce" the length of literal
dag.clear()
Variable.set(key="arg1", value=[1])
with dag:
task_2.expand(arg2=task_1()).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Run the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1")
task1 = dag.get_task("task_1")
ti.refresh_from_task(task1)
ti.run()
task2 = dag.get_task("task_2")
for ti in dr.get_task_instances():
if ti.map_index < 0:
ti.task = task1
else:
ti.task = task2
ti.state = TaskInstanceState.SUCCESS
session.merge(ti)
session.flush()
# Run verify_integrity as a whole and assert some tasks were removed
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, TaskInstanceState.SUCCESS),
(1, TaskInstanceState.REMOVED),
(2, TaskInstanceState.REMOVED),
]
# assert repeated calls did not change the instances
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, TaskInstanceState.SUCCESS),
(1, TaskInstanceState.REMOVED),
(2, TaskInstanceState.REMOVED),
] |
Test no change in mapped task at runtime with calls to dagrun.verify_integrity | def test_calls_to_verify_integrity_with_mapped_task_with_no_changes_at_runtime(dag_maker, session):
"""
Test no change in mapped task at runtime with calls to dagrun.verify_integrity
"""
from airflow.models import Variable
Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
return Variable.get("arg1", deserialize_json=True)
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
]
# Now "clear" and return the same length
dag.clear()
Variable.set(key="arg1", value=[1, 2, 3])
with dag:
task_2.expand(arg2=task_1()).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Run the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1")
task1 = dag.get_task("task_1")
ti.refresh_from_task(task1)
ti.run()
task2 = dag.get_task("task_2")
for ti in dr.get_task_instances():
if ti.map_index < 0:
ti.task = task1
else:
ti.task = task2
ti.state = TaskInstanceState.SUCCESS
session.merge(ti)
session.flush()
# Run verify_integrity as a whole and assert no changes
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, TaskInstanceState.SUCCESS),
(1, TaskInstanceState.SUCCESS),
(2, TaskInstanceState.SUCCESS),
]
# assert repeated calls did not change the instances
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, TaskInstanceState.SUCCESS),
(1, TaskInstanceState.SUCCESS),
(2, TaskInstanceState.SUCCESS),
] |
Test zero length reduction in mapped task at runtime with calls to dagrun.verify_integrity | def test_calls_to_verify_integrity_with_mapped_task_zero_length_at_runtime(dag_maker, session, caplog):
"""
Test zero length reduction in mapped task at runtime with calls to dagrun.verify_integrity
"""
import logging
from airflow.models import Variable
Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
return Variable.get("arg1", deserialize_json=True)
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
dr.task_instance_scheduling_decisions()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
]
ti1 = next(i for i in tis if i.map_index == 0)
# Now "clear" and "reduce" the length to empty list
dag.clear()
Variable.set(key="arg1", value=[])
with dag:
task_2.expand(arg2=task_1()).operator
# At this point, we need to test that the change works on the serialized
# DAG (which is what the scheduler operates on)
serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
dr.dag = serialized_dag
# Run the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1")
task1 = dag.get_task("task_1")
ti.refresh_from_task(task1)
ti.run()
task2 = dag.get_task("task_2")
for ti in dr.get_task_instances():
if ti.map_index < 0:
ti.task = task1
else:
ti.task = task2
session.merge(ti)
session.flush()
with caplog.at_level(logging.DEBUG):
# Run verify_integrity as a whole and assert the tasks were removed
dr.verify_integrity()
tis = dr.get_task_instances()
indices = [(ti.map_index, ti.state) for ti in tis if ti.map_index >= 0]
assert sorted(indices) == [
(0, TaskInstanceState.REMOVED),
(1, TaskInstanceState.REMOVED),
(2, TaskInstanceState.REMOVED),
]
assert (
f"Removing task '{ti1}' as the map_index is longer than the resolved mapping list (0)"
in caplog.text
) |
Test that dynamic task inside a dynamic task group only marks
the corresponding downstream EmptyOperator as success. | def test_mapped_task_group_empty_operator(dag_maker, session):
"""
Test that dynamic task inside a dynamic task group only marks
the corresponding downstream EmptyOperator as success.
"""
literal = [1, 2, 3]
with dag_maker(session=session) as dag:
@task_group
def tg(x):
@task
def t1(x):
return x
t2 = EmptyOperator(task_id="t2")
@task
def t3(x):
return x
t1(x) >> t2 >> t3(x)
tg.expand(x=literal)
dr = dag_maker.create_dagrun()
t2_task = dag.get_task("tg.t2")
t2_0 = dr.get_task_instance(task_id="tg.t2", map_index=0)
t2_0.refresh_from_task(t2_task)
assert t2_0.state is None
t2_1 = dr.get_task_instance(task_id="tg.t2", map_index=1)
t2_1.refresh_from_task(t2_task)
assert t2_1.state is None
dr.schedule_tis([t2_0])
t2_0 = dr.get_task_instance(task_id="tg.t2", map_index=0)
assert t2_0.state == TaskInstanceState.SUCCESS
t2_1 = dr.get_task_instance(task_id="tg.t2", map_index=1)
assert t2_1.state is None |
Test that an operator with _start_trigger and _next_method set can be directly
deferred during scheduling. | def test_schedule_tis_start_trigger(dag_maker, session):
"""
Test that an operator with _start_trigger and _next_method set can be directly
deferred during scheduling.
"""
trigger = SuccessTrigger()
class TestOperator(BaseOperator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_trigger = trigger
self.next_method = "execute_complete"
def execute_complete(self):
pass
with dag_maker(session=session):
task = TestOperator(task_id="test_task")
dr: DagRun = dag_maker.create_dagrun()
ti = TI(task=task, run_id=dr.run_id, state=None)
assert ti.state is None
dr.schedule_tis((ti,), session=session)
assert ti.state == TaskInstanceState.DEFERRED |
Test that clearing a task and moving from non-mapped to mapped clears existing
references in XCom, TaskFail, TaskInstanceNote, TaskReschedule and
RenderedTaskInstanceFields. To be able to test this, RenderedTaskInstanceFields
was not used in the test since it would require that the task is expanded first. | def test_clearing_task_and_moving_from_non_mapped_to_mapped(dag_maker, session):
"""
Test that clearing a task and moving from non-mapped to mapped clears existing
references in XCom, TaskFail, TaskInstanceNote, TaskReschedule and
RenderedTaskInstanceFields. To be able to test this, RenderedTaskInstanceFields
was not used in the test since it would require that the task is expanded first.
"""
from airflow.models.taskfail import TaskFail
from airflow.models.xcom import XCom
@task
def printx(x):
print(x)
with dag_maker() as dag:
printx.expand(x=[1])
dr1: DagRun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti = dr1.get_task_instances()[0]
filter_kwargs = dict(dag_id=ti.dag_id, task_id=ti.task_id, run_id=ti.run_id, map_index=ti.map_index)
ti = session.query(TaskInstance).filter_by(**filter_kwargs).one()
tr = TaskReschedule(
task_id=ti.task_id,
dag_id=ti.dag_id,
run_id=ti.run_id,
try_number=ti.try_number,
start_date=timezone.datetime(2017, 1, 1),
end_date=timezone.datetime(2017, 1, 2),
reschedule_date=timezone.datetime(2017, 1, 1),
)
# mimicking a case where task moved from non-mapped to mapped
# in that case, it would have map_index of -1 even though mapped
ti.map_index = -1
ti.note = "sample note"
session.merge(ti)
session.flush()
# Purposely omitted RenderedTaskInstanceFields because the ti need
# to be expanded but here we are mimicking and made it map_index -1
session.add(tr)
session.add(TaskFail(ti))
XCom.set(key="test", value="value", task_id=ti.task_id, dag_id=dag.dag_id, run_id=ti.run_id)
session.commit()
for table in [TaskFail, TaskInstanceNote, TaskReschedule, XCom]:
assert session.query(table).count() == 1
dr1.task_instance_scheduling_decisions(session)
for table in [TaskFail, TaskInstanceNote, TaskReschedule, XCom]:
assert session.query(table).count() == 0 |
We use a convenience notation to wire up test scenarios:
t<num> -- teardown task
t<num>_ -- teardown task with on_failure_fail_dagrun = True
s<num> -- setup task
w<num> -- work task (a.k.a. normal task)
In the test input, each line is a statement. We'll automatically create the tasks and wire them up
as indicated in the test input. | def test_tis_considered_for_state(dag_maker, session, input, expected):
"""
We use a convenience notation to wire up test scenarios:
t<num> -- teardown task
t<num>_ -- teardown task with on_failure_fail_dagrun = True
s<num> -- setup task
w<num> -- work task (a.k.a. normal task)
In the test input, each line is a statement. We'll automatically create the tasks and wire them up
as indicated in the test input.
"""
@teardown
def teardown_task():
print(1)
@task
def work_task():
print(1)
@setup
def setup_task():
print(1)
def make_task(task_id, dag):
"""
Task factory helper.
Will give a setup, teardown, work, or teardown-with-dagrun-failure task depending on input.
"""
if task_id.startswith("s"):
factory = setup_task
elif task_id.startswith("w"):
factory = work_task
elif task_id.endswith("_"):
factory = teardown_task.override(on_failure_fail_dagrun=True)
else:
factory = teardown_task
return dag.task_dict.get(task_id) or factory.override(task_id=task_id)()
with dag_maker() as dag:
for line in input:
tasks = [make_task(x, dag) for x in line.split(" >> ")]
reduce(lambda x, y: x >> y, tasks)
dr = dag_maker.create_dagrun()
tis = dr.task_instance_scheduling_decisions(session).tis
tis_for_state = {x.task_id for x in dr._tis_for_dagrun_state(dag=dag, tis=tis)}
assert tis_for_state == expected |
Test that dependencies are correct when mapping with an XComArg | def test_map_xcom_arg():
"""Test that dependencies are correct when mapping with an XComArg"""
with DAG("test-dag", start_date=DEFAULT_DATE):
task1 = BaseOperator(task_id="op1")
mapped = MockOperator.partial(task_id="task_2").expand(arg2=task1.output)
finish = MockOperator(task_id="finish")
mapped >> finish
assert task1.downstream_list == [mapped] |
Test that the correct number of downstream tasks are generated when mapping with an XComArg | def test_map_xcom_arg_multiple_upstream_xcoms(dag_maker, session):
"""Test that the correct number of downstream tasks are generated when mapping with an XComArg"""
class PushExtraXComOperator(BaseOperator):
"""Push an extra XCom value along with the default return value."""
def __init__(self, return_value, **kwargs):
super().__init__(**kwargs)
self.return_value = return_value
def execute(self, context):
context["task_instance"].xcom_push(key="extra_key", value="extra_value")
return self.return_value
with dag_maker("test-dag", session=session, start_date=DEFAULT_DATE) as dag:
upstream_return = [1, 2, 3]
task1 = PushExtraXComOperator(return_value=upstream_return, task_id="task_1")
task2 = PushExtraXComOperator.partial(task_id="task_2").expand(return_value=task1.output)
task3 = PushExtraXComOperator.partial(task_id="task_3").expand(return_value=task2.output)
dr = dag_maker.create_dagrun()
ti_1 = dr.get_task_instance("task_1", session)
ti_1.run()
ti_2s, _ = task2.expand_mapped_task(dr.run_id, session=session)
for ti in ti_2s:
ti.refresh_from_task(dag.get_task("task_2"))
ti.run()
ti_3s, _ = task3.expand_mapped_task(dr.run_id, session=session)
for ti in ti_3s:
ti.refresh_from_task(dag.get_task("task_3"))
ti.run()
assert len(ti_3s) == len(ti_2s) == len(upstream_return) |
`.partial` on an instance should fail -- it's only designed to be called on classes | def test_partial_on_instance() -> None:
"""`.partial` on an instance should fail -- it's only designed to be called on classes"""
with pytest.raises(TypeError):
MockOperator(task_id="a").partial() |
Test that when we pass invalid args to partial().
I.e. if an arg is not known on the class or any of its parent classes we error at parse time | def test_partial_on_class_invalid_ctor_args() -> None:
"""Test that when we pass invalid args to partial().
I.e. if an arg is not known on the class or any of its parent classes we error at parse time
"""
with pytest.raises(TypeError, match=r"arguments 'foo', 'bar'"):
MockOperator.partial(task_id="a", foo="bar", bar=2) |
This test tries to recreate a faulty state in the database and checks if we can recover from it.
The state that happens is that there exists mapped task instances and the unmapped task instance.
So we have instances with map_index [-1, 0, 1]. The -1 task instances should be removed in this case. | def test_expand_mapped_task_failed_state_in_db(dag_maker, session):
"""
This test tries to recreate a faulty state in the database and checks if we can recover from it.
The state that happens is that there exists mapped task instances and the unmapped task instance.
So we have instances with map_index [-1, 0, 1]. The -1 task instances should be removed in this case.
"""
literal = [1, 2]
with dag_maker(session=session):
task1 = BaseOperator(task_id="op1")
mapped = MockOperator.partial(task_id="task_2").expand(arg2=task1.output)
dr = dag_maker.create_dagrun()
session.add(
TaskMap(
dag_id=dr.dag_id,
task_id=task1.task_id,
run_id=dr.run_id,
map_index=-1,
length=len(literal),
keys=None,
)
)
for index in range(2):
# Give the existing TIs a state to make sure we don't change them
ti = TaskInstance(mapped, run_id=dr.run_id, map_index=index, state=TaskInstanceState.SUCCESS)
session.add(ti)
session.flush()
indices = (
session.query(TaskInstance.map_index, TaskInstance.state)
.filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)
.order_by(TaskInstance.map_index)
.all()
)
# Make sure we have the faulty state in the database
assert indices == [(-1, None), (0, "success"), (1, "success")]
mapped.expand_mapped_task(dr.run_id, session=session)
indices = (
session.query(TaskInstance.map_index, TaskInstance.state)
.filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)
.order_by(TaskInstance.map_index)
.all()
)
# The -1 index should be cleaned up
assert indices == [(0, "success"), (1, "success")] |
Test that the correct number of downstream tasks are generated when mapping with an XComArg | def test_expand_mapped_task_instance_with_named_index(
dag_maker,
session,
create_mapped_task,
template,
expected_rendered_names,
) -> None:
"""Test that the correct number of downstream tasks are generated when mapping with an XComArg"""
with dag_maker("test-dag", session=session, start_date=DEFAULT_DATE):
create_mapped_task(task_id="task1", map_names=["a", "b"], template=template)
dr = dag_maker.create_dagrun()
tis = dr.get_task_instances()
for ti in tis:
ti.run()
session.flush()
indices = session.scalars(
select(TaskInstance.rendered_map_index)
.where(
TaskInstance.dag_id == "test-dag",
TaskInstance.task_id == "task1",
TaskInstance.run_id == dr.run_id,
)
.order_by(TaskInstance.map_index)
).all()
assert indices == expected_rendered_names |
Loads DAGs from a module for test. | def make_example_dags(module):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module.__path__[0])
return dagbag.dags |
Test that AirflowSensorTimeout does not cause sensor to retry. | def test_sensor_timeout(mode, retries, dag_maker):
"""
Test that AirflowSensorTimeout does not cause sensor to retry.
"""
def timeout():
raise AirflowSensorTimeout
mock_on_failure = mock.MagicMock()
mock_on_failure.__name__ = "mock_on_failure"
with dag_maker(dag_id=f"test_sensor_timeout_{mode}_{retries}"):
PythonSensor(
task_id="test_raise_sensor_timeout",
python_callable=timeout,
on_failure_callback=mock_on_failure,
retries=retries,
mode=mode,
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
with pytest.raises(AirflowSensorTimeout):
ti.run()
assert mock_on_failure.called
assert ti.state == State.FAILED |
Test that AirflowSensorTimeout does not cause mapped sensor to retry. | def test_mapped_sensor_timeout(mode, retries, dag_maker):
"""
Test that AirflowSensorTimeout does not cause mapped sensor to retry.
"""
def timeout():
raise AirflowSensorTimeout
mock_on_failure = mock.MagicMock()
mock_on_failure.__name__ = "mock_on_failure"
with dag_maker(dag_id=f"test_sensor_timeout_{mode}_{retries}"):
PythonSensor.partial(
task_id="test_raise_sensor_timeout",
python_callable=timeout,
on_failure_callback=mock_on_failure,
retries=retries,
).expand(mode=[mode])
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
with pytest.raises(AirflowSensorTimeout):
ti.run()
assert mock_on_failure.called
assert ti.state == State.FAILED |
Test that mapped sensors reaches success state. | def test_mapped_sensor_works(mode, retries, dag_maker):
"""
Test that mapped sensors reaches success state.
"""
def timeout(ti):
return 1
with dag_maker(dag_id=f"test_sensor_timeout_{mode}_{retries}"):
PythonSensor.partial(
task_id="test_raise_sensor_timeout",
python_callable=timeout,
retries=retries,
).expand(mode=[mode])
ti = dag_maker.create_dagrun().task_instances[0]
ti.run()
assert ti.state == State.SUCCESS |
Ensure we access XCom lazily when pulling from a mapped operator. | def test_ti_xcom_pull_on_mapped_operator_return_lazy_iterable(mock_deserialize_value, dag_maker, session):
"""Ensure we access XCom lazily when pulling from a mapped operator."""
with dag_maker(dag_id="test_xcom", session=session):
# Use the private _expand() method to avoid the empty kwargs check.
# We don't care about how the operator runs here, only its presence.
task_1 = EmptyOperator.partial(task_id="task_1")._expand(EXPAND_INPUT_EMPTY, strict=False)
EmptyOperator(task_id="task_2")
dagrun = dag_maker.create_dagrun()
ti_1_0 = dagrun.get_task_instance("task_1", session=session)
ti_1_0.map_index = 0
ti_1_1 = session.merge(TaskInstance(task_1, run_id=dagrun.run_id, map_index=1, state=ti_1_0.state))
session.flush()
ti_1_0.xcom_push(key=XCOM_RETURN_KEY, value="a", session=session)
ti_1_1.xcom_push(key=XCOM_RETURN_KEY, value="b", session=session)
ti_2 = dagrun.get_task_instance("task_2", session=session)
# Simply pulling the joined XCom value should not deserialize.
joined = ti_2.xcom_pull("task_1", session=session)
assert isinstance(joined, LazyXComAccess)
assert mock_deserialize_value.call_count == 0
# Only when we go through the iterable does deserialization happen.
it = iter(joined)
assert next(it) == "a"
assert mock_deserialize_value.call_count == 1
assert next(it) == "b"
assert mock_deserialize_value.call_count == 2
with pytest.raises(StopIteration):
next(it) |
Test expand on non-templated fields sets upstream deps properly. | def test_expand_non_templated_field(dag_maker, session):
"""Test expand on non-templated fields sets upstream deps properly."""
class SimpleBashOperator(BashOperator):
template_fields = ()
with dag_maker(dag_id="product_same_types", session=session) as dag:
@dag.task
def get_extra_env():
return [{"foo": "bar"}, {"foo": "biz"}]
SimpleBashOperator.partial(task_id="echo", bash_command="echo $FOO").expand(env=get_extra_env())
dag_maker.create_dagrun()
echo_task = dag.get_task("echo")
assert "get_extra_env" in echo_task.upstream_task_ids |
This tests that when scheduling child tasks of a task and there's a mapped downstream task,
if the mapped downstream task has upstreams that are not yet done, the mapped downstream task is
not marked as `upstream_failed' | def test_mapped_task_does_not_error_in_mini_scheduler_if_upstreams_are_not_done(dag_maker, caplog, session):
"""
This tests that when scheduling child tasks of a task and there's a mapped downstream task,
if the mapped downstream task has upstreams that are not yet done, the mapped downstream task is
not marked as `upstream_failed'
"""
with dag_maker() as dag:
@dag.task
def second_task():
return [0, 1, 2]
@dag.task
def first_task():
print(2)
@dag.task
def middle_task(id):
return id
middle = middle_task.expand(id=second_task())
@dag.task
def last_task():
print(3)
[first_task(), middle] >> last_task()
dag_run = dag_maker.create_dagrun()
first_ti = dag_run.get_task_instance(task_id="first_task")
second_ti = dag_run.get_task_instance(task_id="second_task")
first_ti.state = State.SUCCESS
second_ti.state = State.RUNNING
session.merge(first_ti)
session.merge(second_ti)
session.commit()
first_ti.schedule_downstream_tasks(session=session)
middle_ti = dag_run.get_task_instance(task_id="middle_task")
assert middle_ti.state != State.UPSTREAM_FAILED
assert "0 downstream tasks scheduled from follow-on schedule" in caplog.text |
This tests verify that operators with inherits_from_empty_operator are not considered by mini scheduler.
Such operators should not run on workers thus the mini scheduler optimization should skip them and not
submit them directly to worker. | def test_empty_operator_is_not_considered_in_mini_scheduler(dag_maker, caplog, session):
"""
This tests verify that operators with inherits_from_empty_operator are not considered by mini scheduler.
Such operators should not run on workers thus the mini scheduler optimization should skip them and not
submit them directly to worker.
"""
with dag_maker() as dag:
@dag.task
def first_task():
print(2)
@dag.task
def second_task():
print(2)
third_task = EmptyOperator(task_id="third_task")
forth_task = EmptyOperator(task_id="forth_task", on_success_callback=lambda x: print("hi"))
first_task() >> [second_task(), third_task, forth_task]
dag_run = dag_maker.create_dagrun()
first_ti = dag_run.get_task_instance(task_id="first_task")
second_ti = dag_run.get_task_instance(task_id="second_task")
third_ti = dag_run.get_task_instance(task_id="third_task")
forth_ti = dag_run.get_task_instance(task_id="forth_task")
first_ti.state = State.SUCCESS
second_ti.state = State.NONE
third_ti.state = State.NONE
forth_ti.state = State.NONE
session.merge(first_ti)
session.merge(second_ti)
session.merge(third_ti)
session.merge(forth_ti)
session.commit()
first_ti.schedule_downstream_tasks(session=session)
second_task = dag_run.get_task_instance(task_id="second_task")
third_task = dag_run.get_task_instance(task_id="third_task")
forth_task = dag_run.get_task_instance(task_id="forth_task")
assert second_task.state == State.SCHEDULED
assert third_task.state == State.NONE
assert forth_task.state == State.SCHEDULED
assert "2 downstream tasks scheduled from follow-on schedule" in caplog.text |
Test that mini scheduler expands mapped task | def test_mapped_task_expands_in_mini_scheduler_if_upstreams_are_done(dag_maker, caplog, session):
"""Test that mini scheduler expands mapped task"""
with dag_maker() as dag:
@dag.task
def second_task():
return [0, 1, 2]
@dag.task
def first_task():
print(2)
@dag.task
def middle_task(id):
return id
middle = middle_task.expand(id=second_task())
@dag.task
def last_task():
print(3)
[first_task(), middle] >> last_task()
dr = dag_maker.create_dagrun()
first_ti = dr.get_task_instance(task_id="first_task")
first_ti.state = State.SUCCESS
session.merge(first_ti)
session.commit()
second_task = dag.get_task("second_task")
second_ti = dr.get_task_instance(task_id="second_task")
second_ti.refresh_from_task(second_task)
second_ti.run()
second_ti.schedule_downstream_tasks(session=session)
for i in range(3):
middle_ti = dr.get_task_instance(task_id="middle_task", map_index=i)
assert middle_ti.state == State.SCHEDULED
assert "3 downstream tasks scheduled from follow-on schedule" in caplog.text |
Check that as_teardown works properly as implemented in PlainXComArg
It should mark the teardown as teardown, and if a task is provided, it should mark that as setup
and set it as a direct upstream. | def test_as_teardown(dag_maker, setup_type, work_type, teardown_type):
"""
Check that as_teardown works properly as implemented in PlainXComArg
It should mark the teardown as teardown, and if a task is provided, it should mark that as setup
and set it as a direct upstream.
"""
with dag_maker() as dag:
s1 = make_task(name="s1", type_=setup_type)
w1 = make_task(name="w1", type_=work_type)
t1 = make_task(name="t1", type_=teardown_type)
# initial conditions
assert cleared_tasks(dag, "w1") == {"w1"}
# after setting deps, still none are setup / teardown
# verify relationships
s1 >> w1 >> t1
assert cleared_tasks(dag, "w1") == {"w1", "t1"}
assert get_task_attr(t1, "is_teardown") is False
assert get_task_attr(s1, "is_setup") is False
assert get_task_attr(t1, "upstream_task_ids") == {"w1"}
# now when we use as_teardown, s1 should be setup, t1 should be teardown, and we should have s1 >> t1
t1.as_teardown(setups=s1)
assert cleared_tasks(dag, "w1") == {"s1", "w1", "t1"}
assert get_task_attr(t1, "is_teardown") is True
assert get_task_attr(s1, "is_setup") is True
assert get_task_attr(t1, "upstream_task_ids") == {"w1", "s1"} |
Check that as_teardown implementations work properly. Tests all combinations of taskflow and classic.
It should mark the teardown as teardown, and if a task is provided, it should mark that as setup
and set it as a direct upstream. | def test_as_teardown_oneline(dag_maker, setup_type, work_type, teardown_type):
"""
Check that as_teardown implementations work properly. Tests all combinations of taskflow and classic.
It should mark the teardown as teardown, and if a task is provided, it should mark that as setup
and set it as a direct upstream.
"""
with dag_maker() as dag:
s1 = make_task(name="s1", type_=setup_type)
w1 = make_task(name="w1", type_=work_type)
t1 = make_task(name="t1", type_=teardown_type)
# verify initial conditions
for task_ in (s1, w1, t1):
assert get_task_attr(task_, "upstream_list") == []
assert get_task_attr(task_, "downstream_list") == []
assert get_task_attr(task_, "is_setup") is False
assert get_task_attr(task_, "is_teardown") is False
assert cleared_tasks(dag, get_task_attr(task_, "task_id")) == {get_task_attr(task_, "task_id")}
# now set the deps in one line
s1 >> w1 >> t1.as_teardown(setups=s1)
# verify resulting configuration
# should be equiv to the following:
# * s1.is_setup = True
# * t1.is_teardown = True
# * s1 >> t1
# * s1 >> w1 >> t1
for task_, exp_up, exp_down in [
(s1, set(), {"w1", "t1"}),
(w1, {"s1"}, {"t1"}),
(t1, {"s1", "w1"}, set()),
]:
assert get_task_attr(task_, "upstream_task_ids") == exp_up
assert get_task_attr(task_, "downstream_task_ids") == exp_down
assert cleared_tasks(dag, "s1") == {"s1", "w1", "t1"}
assert cleared_tasks(dag, "w1") == {"s1", "w1", "t1"}
assert cleared_tasks(dag, "t1") == {"t1"}
for task_, exp_is_setup, exp_is_teardown in [
(s1, True, False),
(w1, False, False),
(t1, False, True),
]:
assert get_task_attr(task_, "is_setup") is exp_is_setup
assert get_task_attr(task_, "is_teardown") is exp_is_teardown |
Fixture that provides a SQLAlchemy session | def session():
"""Fixture that provides a SQLAlchemy session"""
with create_session() as session:
yield session |
Tests that unused triggers (those with no task instances referencing them)
are cleaned out automatically. | def test_clean_unused(session, create_task_instance):
"""
Tests that unused triggers (those with no task instances referencing them)
are cleaned out automatically.
"""
# Make three triggers
trigger1 = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger1.id = 1
trigger2 = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger2.id = 2
trigger3 = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger3.id = 3
session.add(trigger1)
session.add(trigger2)
session.add(trigger3)
session.commit()
assert session.query(Trigger).count() == 3
# Tie one to a fake TaskInstance that is not deferred, and one to one that is
task_instance = create_task_instance(
session=session, task_id="fake", state=State.DEFERRED, execution_date=timezone.utcnow()
)
task_instance.trigger_id = trigger1.id
session.add(task_instance)
fake_task = EmptyOperator(task_id="fake2", dag=task_instance.task.dag)
task_instance = TaskInstance(task=fake_task, run_id=task_instance.run_id)
task_instance.state = State.SUCCESS
task_instance.trigger_id = trigger2.id
session.add(task_instance)
session.commit()
# Run clear operation
Trigger.clean_unused()
# Verify that one trigger is gone, and the right one is left
assert session.query(Trigger).one().id == trigger1.id |
Tests that events submitted to a trigger re-wake their dependent
task instances. | def test_submit_event(session, create_task_instance):
"""
Tests that events submitted to a trigger re-wake their dependent
task instances.
"""
# Make a trigger
trigger = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger.id = 1
session.add(trigger)
session.commit()
# Make a TaskInstance that's deferred and waiting on it
task_instance = create_task_instance(
session=session, execution_date=timezone.utcnow(), state=State.DEFERRED
)
task_instance.trigger_id = trigger.id
task_instance.next_kwargs = {"cheesecake": True}
session.commit()
# Call submit_event
Trigger.submit_event(trigger.id, TriggerEvent(42), session=session)
# commit changes made by submit event and expire all cache to read from db.
session.flush()
session.expunge_all()
# Check that the task instance is now scheduled
updated_task_instance = session.query(TaskInstance).one()
assert updated_task_instance.state == State.SCHEDULED
assert updated_task_instance.next_kwargs == {"event": 42, "cheesecake": True} |
Tests that failures submitted to a trigger fail their dependent
task instances. | def test_submit_failure(session, create_task_instance):
"""
Tests that failures submitted to a trigger fail their dependent
task instances.
"""
# Make a trigger
trigger = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger.id = 1
session.add(trigger)
session.commit()
# Make a TaskInstance that's deferred and waiting on it
task_instance = create_task_instance(
task_id="fake", execution_date=timezone.utcnow(), state=State.DEFERRED
)
task_instance.trigger_id = trigger.id
session.commit()
# Call submit_event
Trigger.submit_failure(trigger.id, session=session)
# Check that the task instance is now scheduled to fail
updated_task_instance = session.query(TaskInstance).one()
assert updated_task_instance.state == State.SCHEDULED
assert updated_task_instance.next_method == "__fail__" |
Tests that unassigned triggers of all appropriate states are assigned. | def test_assign_unassigned(session, create_task_instance):
"""
Tests that unassigned triggers of all appropriate states are assigned.
"""
time_now = timezone.utcnow()
triggerer_heartrate = 10
finished_triggerer = Job(heartrate=triggerer_heartrate, state=State.SUCCESS)
TriggererJobRunner(finished_triggerer)
finished_triggerer.end_date = time_now - datetime.timedelta(hours=1)
session.add(finished_triggerer)
assert not finished_triggerer.is_alive()
healthy_triggerer = Job(heartrate=triggerer_heartrate, state=State.RUNNING)
TriggererJobRunner(healthy_triggerer)
session.add(healthy_triggerer)
assert healthy_triggerer.is_alive()
new_triggerer = Job(heartrate=triggerer_heartrate, state=State.RUNNING)
TriggererJobRunner(new_triggerer)
session.add(new_triggerer)
assert new_triggerer.is_alive()
# This trigger's last heartbeat is older than the check threshold, expect
# its triggers to be taken by other healthy triggerers below
unhealthy_triggerer = Job(
heartrate=triggerer_heartrate,
state=State.RUNNING,
latest_heartbeat=time_now - datetime.timedelta(seconds=100),
)
TriggererJobRunner(unhealthy_triggerer)
session.add(unhealthy_triggerer)
# Triggerer is not healtht, its last heartbeat was too long ago
assert not unhealthy_triggerer.is_alive()
session.commit()
trigger_on_healthy_triggerer = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger_on_healthy_triggerer.id = 1
trigger_on_healthy_triggerer.triggerer_id = healthy_triggerer.id
session.add(trigger_on_healthy_triggerer)
ti_trigger_on_healthy_triggerer = create_task_instance(
task_id="ti_trigger_on_healthy_triggerer",
execution_date=time_now,
run_id="trigger_on_healthy_triggerer_run_id",
)
ti_trigger_on_healthy_triggerer.trigger_id = trigger_on_healthy_triggerer.id
session.add(ti_trigger_on_healthy_triggerer)
trigger_on_unhealthy_triggerer = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger_on_unhealthy_triggerer.id = 2
trigger_on_unhealthy_triggerer.triggerer_id = unhealthy_triggerer.id
session.add(trigger_on_unhealthy_triggerer)
ti_trigger_on_unhealthy_triggerer = create_task_instance(
task_id="ti_trigger_on_unhealthy_triggerer",
execution_date=time_now + datetime.timedelta(hours=1),
run_id="trigger_on_unhealthy_triggerer_run_id",
)
ti_trigger_on_unhealthy_triggerer.trigger_id = trigger_on_unhealthy_triggerer.id
session.add(ti_trigger_on_unhealthy_triggerer)
trigger_on_killed_triggerer = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger_on_killed_triggerer.id = 3
trigger_on_killed_triggerer.triggerer_id = finished_triggerer.id
session.add(trigger_on_killed_triggerer)
ti_trigger_on_killed_triggerer = create_task_instance(
task_id="ti_trigger_on_killed_triggerer",
execution_date=time_now + datetime.timedelta(hours=2),
run_id="trigger_on_killed_triggerer_run_id",
)
ti_trigger_on_killed_triggerer.trigger_id = trigger_on_killed_triggerer.id
session.add(ti_trigger_on_killed_triggerer)
trigger_unassigned_to_triggerer = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
trigger_unassigned_to_triggerer.id = 4
session.add(trigger_unassigned_to_triggerer)
ti_trigger_unassigned_to_triggerer = create_task_instance(
task_id="ti_trigger_unassigned_to_triggerer",
execution_date=time_now + datetime.timedelta(hours=3),
run_id="trigger_unassigned_to_triggerer_run_id",
)
ti_trigger_unassigned_to_triggerer.trigger_id = trigger_unassigned_to_triggerer.id
session.add(ti_trigger_unassigned_to_triggerer)
assert trigger_unassigned_to_triggerer.triggerer_id is None
session.commit()
assert session.query(Trigger).count() == 4
Trigger.assign_unassigned(new_triggerer.id, 100, health_check_threshold=30)
session.expire_all()
# Check that trigger on killed triggerer and unassigned trigger are assigned to new triggerer
assert (
session.query(Trigger).filter(Trigger.id == trigger_on_killed_triggerer.id).one().triggerer_id
== new_triggerer.id
)
assert (
session.query(Trigger).filter(Trigger.id == trigger_unassigned_to_triggerer.id).one().triggerer_id
== new_triggerer.id
)
# Check that trigger on healthy triggerer still assigned to existing triggerer
assert (
session.query(Trigger).filter(Trigger.id == trigger_on_healthy_triggerer.id).one().triggerer_id
== healthy_triggerer.id
)
# Check that trigger on unhealthy triggerer is assigned to new triggerer
assert (
session.query(Trigger).filter(Trigger.id == trigger_on_unhealthy_triggerer.id).one().triggerer_id
== new_triggerer.id
) |
Tests that triggers are sorted by the creation_date if they have the same priority. | def test_get_sorted_triggers_same_priority_weight(session, create_task_instance):
"""
Tests that triggers are sorted by the creation_date if they have the same priority.
"""
old_execution_date = datetime.datetime(
2023, 5, 9, 12, 16, 14, 474415, tzinfo=pytz.timezone("Africa/Abidjan")
)
trigger_old = Trigger(
classpath="airflow.triggers.testing.SuccessTrigger",
kwargs={},
created_date=old_execution_date + datetime.timedelta(seconds=30),
)
trigger_old.id = 1
session.add(trigger_old)
TI_old = create_task_instance(
task_id="old",
execution_date=old_execution_date,
run_id="old_run_id",
)
TI_old.priority_weight = 1
TI_old.trigger_id = trigger_old.id
session.add(TI_old)
new_execution_date = datetime.datetime(
2023, 5, 9, 12, 17, 14, 474415, tzinfo=pytz.timezone("Africa/Abidjan")
)
trigger_new = Trigger(
classpath="airflow.triggers.testing.SuccessTrigger",
kwargs={},
created_date=new_execution_date + datetime.timedelta(seconds=30),
)
trigger_new.id = 2
session.add(trigger_new)
TI_new = create_task_instance(
task_id="new",
execution_date=new_execution_date,
run_id="new_run_id",
)
TI_new.priority_weight = 1
TI_new.trigger_id = trigger_new.id
session.add(TI_new)
session.commit()
assert session.query(Trigger).count() == 2
trigger_ids_query = Trigger.get_sorted_triggers(capacity=100, alive_triggerer_ids=[], session=session)
assert trigger_ids_query == [(1,), (2,)] |
Tests that triggers are sorted by the priority_weight. | def test_get_sorted_triggers_different_priority_weights(session, create_task_instance):
"""
Tests that triggers are sorted by the priority_weight.
"""
old_execution_date = datetime.datetime(
2023, 5, 9, 12, 16, 14, 474415, tzinfo=pytz.timezone("Africa/Abidjan")
)
trigger_old = Trigger(
classpath="airflow.triggers.testing.SuccessTrigger",
kwargs={},
created_date=old_execution_date + datetime.timedelta(seconds=30),
)
trigger_old.id = 1
session.add(trigger_old)
TI_old = create_task_instance(
task_id="old",
execution_date=old_execution_date,
run_id="old_run_id",
)
TI_old.priority_weight = 1
TI_old.trigger_id = trigger_old.id
session.add(TI_old)
new_execution_date = datetime.datetime(
2023, 5, 9, 12, 17, 14, 474415, tzinfo=pytz.timezone("Africa/Abidjan")
)
trigger_new = Trigger(
classpath="airflow.triggers.testing.SuccessTrigger",
kwargs={},
created_date=new_execution_date + datetime.timedelta(seconds=30),
)
trigger_new.id = 2
session.add(trigger_new)
TI_new = create_task_instance(
task_id="new",
execution_date=new_execution_date,
run_id="new_run_id",
)
TI_new.priority_weight = 2
TI_new.trigger_id = trigger_new.id
session.add(TI_new)
session.commit()
assert session.query(Trigger).count() == 2
trigger_ids_query = Trigger.get_sorted_triggers(capacity=100, alive_triggerer_ids=[], session=session)
assert trigger_ids_query == [(2,), (1,)] |
Tests that sensitive kwargs are encrypted. | def test_serialize_sensitive_kwargs():
"""
Tests that sensitive kwargs are encrypted.
"""
trigger_instance = SensitiveKwargsTrigger(param1="value1", param2="value2")
trigger_row: Trigger = Trigger.from_object(trigger_instance)
assert trigger_row.kwargs["param1"] == "value1"
assert trigger_row.kwargs["param2"] == "value2"
assert isinstance(trigger_row.encrypted_kwargs, str)
assert "value1" not in trigger_row.encrypted_kwargs
assert "value2" not in trigger_row.encrypted_kwargs |
Tests that we don't decrypt kwargs if they aren't encrypted.
We weren't able to encrypt the kwargs in all migration paths. | def test_kwargs_not_encrypted():
"""
Tests that we don't decrypt kwargs if they aren't encrypted.
We weren't able to encrypt the kwargs in all migration paths.
"""
trigger = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={})
# force the `encrypted_kwargs` to be unencrypted, like they would be after an offline upgrade
trigger.encrypted_kwargs = json.dumps(
BaseSerialization.serialize({"param1": "value1", "param2": "value2"})
)
assert trigger.kwargs["param1"] == "value1"
assert trigger.kwargs["param2"] == "value2" |
Reset XCom entries. | def reset_db():
"""Reset XCom entries."""
with create_session() as session:
session.query(DagRun).delete()
session.query(XCom).delete() |
Create all directories and files that should be ignored. And set base path. | def populate_dir(root_path):
"""
Create all directories and files that should be ignored. And set base path.
"""
# Temp dir name includes an ignored token "not", but it shouldn't matter since it's in the base path.
test_dir = root_path / "onotole"
plugin_folder_path = test_dir / "test_ignore"
plugin_folder_path.mkdir(parents=True)
for name in ("subdir1", "subdir2", "subdir3"):
(plugin_folder_path / name).mkdir()
files_content = [
["test_load.py", "#Should not be ignored file"],
["test_notload.py", 'raise Exception("This file should have been ignored!")'],
[".airflowignore", "#ignore test\nnot\nsubdir2"],
[".airflowignore_glob", "#ignore test\n**/*not*\nsubdir2/"],
["subdir1/.airflowignore", "#ignore test\nnone\n_glob"],
["subdir1/.airflowignore_glob", "#ignore test\n*none*"],
["subdir1/test_load_sub1.py", "#Should not be ignored file"],
["test_notload_sub.py", 'raise Exception("This file should have been ignored!")'],
["subdir1/test_noneload_sub1.py", 'raise Exception("This file should have been ignored!")'],
["subdir2/test_shouldignore.py", 'raise Exception("This file should have been ignored!")'],
["subdir3/test_notload_sub3.py", 'raise Exception("This file should have been ignored!")'],
]
for file_path, content in files_content:
(plugin_folder_path / file_path).write_text(content)
patch.object(settings, "PLUGINS_FOLDER", return_value=plugin_folder_path)
return plugin_folder_path |
Patch ``BaseHook.get_connection()`` by mock value.
This fixture optionally parametrized, if ``param`` not set or empty it just mock method.
If param is dictionary or :class:`~airflow.models.Connection` than return it,
If param is exception than add side effect.
Otherwise, it raises an error | def hook_conn(request):
"""
Patch ``BaseHook.get_connection()`` by mock value.
This fixture optionally parametrized, if ``param`` not set or empty it just mock method.
If param is dictionary or :class:`~airflow.models.Connection` than return it,
If param is exception than add side effect.
Otherwise, it raises an error
"""
try:
conn = request.param
except AttributeError:
conn = None
with mock.patch("airflow.hooks.base.BaseHook.get_connection") as m:
if not conn:
pass # Don't do anything if param not specified or empty
elif isinstance(conn, dict):
m.return_value = Connection(**conn)
elif not isinstance(conn, Connection):
m.return_value = conn
elif isinstance(conn, Exception):
m.side_effect = conn
else:
raise TypeError(
f"{request.node.name!r}: expected dict, Connection object or Exception, "
f"but got {type(conn).__name__}"
)
yield m |
Function decorator that skip this test function if no valid connection id is specified. | def skip_test_if_no_valid_conn_id(func: T) -> T:
"""
Function decorator that skip this test function if no valid connection id is specified.
"""
function_signature = signature(func)
@wraps(func)
def wrapper(*args, **kwargs) -> None:
bound_args = function_signature.bind(*args, **kwargs)
self = args[0]
if self.hook is not None:
return func(*bound_args.args, **bound_args.kwargs)
else:
return None
return cast(T, wrapper) |
Filter known botocore future warnings. | def filter_botocore_warnings(botocore_version):
"""Filter known botocore future warnings."""
with warnings.catch_warnings():
if botocore_version and botocore_version < (1, 29):
# By default, for some clients botocore use deprecated endpoints `{region}.{service}.{dnsSuffix}`
# In botocore 1.29 it will be replaced by `{service}.{region}.{dnsSuffix}`
# and the warning should gone
# See: https://github.com/boto/botocore/issues/2705
warnings.filterwarnings(
"ignore",
category=FutureWarning,
module="botocore.client",
message="The .* client is currently using a deprecated endpoint.*",
)
yield |
Session scoped fixture, return mock AWS specific environment variables for unit tests. | def aws_testing_env_vars(tmp_path_factory):
"""Session scoped fixture, return mock AWS specific environment variables for unit tests."""
tmp_dir = tmp_path_factory.mktemp("aws-configs-")
def empty_config(name: str) -> str:
config = tmp_dir / name
config.touch()
return str(config)
return {
# Mock values for access_key, secret_key and token
"AWS_ACCESS_KEY_ID": "airflow-testing",
"AWS_SECRET_ACCESS_KEY": "airflow-testing",
"AWS_SESSION_TOKEN": "airflow-testing",
"AWS_SECURITY_TOKEN": "airflow-testing",
# Set default region as N.Virginia (eu-west-1).
# Otherwise some unit tests might fail if this sets to other region.
"AWS_DEFAULT_REGION": "us-east-1",
"AWS_REGION": "us-east-1",
# Create empty configuration file
"AWS_SHARED_CREDENTIALS_FILE": empty_config("aws_shared_credentials_file"),
"AWS_CONFIG_FILE": empty_config("aws_config_file"),
"BOTO_CONFIG": empty_config("legacy_boto2_config.cfg"),
} |
Change AWS configurations (env vars) before start each test.
1. Remove all existed variables which prefixed by AWS.
It might be some credentials, botocore configurations, etc.
2. Use pre-defined variables for unit testing. | def set_default_aws_settings(aws_testing_env_vars, monkeypatch):
"""
Change AWS configurations (env vars) before start each test.
1. Remove all existed variables which prefixed by AWS.
It might be some credentials, botocore configurations, etc.
2. Use pre-defined variables for unit testing.
"""
for env_name in os.environ:
if env_name.startswith("AWS"):
monkeypatch.delenv(env_name, raising=False)
for env_name, value in aws_testing_env_vars.items():
monkeypatch.setenv(env_name, value) |
Mock Batch Executor to a repeatable starting state. | def mock_executor(set_env_vars) -> AwsBatchExecutor:
"""Mock Batch Executor to a repeatable starting state."""
executor = AwsBatchExecutor()
executor.IS_BOTO_CONNECTION_HEALTHY = True
# Replace boto3 Batch client with mock.
batch_mock = mock.Mock(spec=executor.batch)
submit_job_ret_val = {"tasks": [{"taskArn": ARN1}], "failures": []}
batch_mock.submit_job.return_value = submit_job_ret_val
executor.batch = batch_mock
return executor |
Mock ECS to a repeatable starting state.. | def mock_executor(set_env_vars) -> AwsEcsExecutor:
"""Mock ECS to a repeatable starting state.."""
executor = AwsEcsExecutor()
executor.IS_BOTO_CONNECTION_HEALTHY = True
# Replace boto3 ECS client with mock.
ecs_mock = mock.Mock(spec=executor.ecs)
run_task_ret_val = {"tasks": [{"taskArn": ARN1}], "failures": []}
ecs_mock.run_task.return_value = run_task_ret_val
executor.ecs = ecs_mock
return executor |
Test raise an error if no credentials provided and default boto3 strategy unable to get creds. | def test_raise_no_creds_default_credentials_strategy(tmp_path_factory, monkeypatch):
"""Test raise an error if no credentials provided and default boto3 strategy unable to get creds."""
for env_key in ("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "AWS_SECURITY_TOKEN"):
# Delete aws credentials environment variables
monkeypatch.delenv(env_key, raising=False)
hook = AwsBaseHook(aws_conn_id=None, client_type="sts")
with pytest.raises(NoCredentialsError) as credential_error:
# Call AWS STS API method GetCallerIdentity
# which should return result in case of valid credentials
hook.conn.get_caller_identity()
assert str(credential_error.value) == "Unable to locate credentials" |
Patch hook object by dummy boto3 Batch client. | def patch_hook(monkeypatch, aws_region):
"""Patch hook object by dummy boto3 Batch client."""
batch_client = boto3.client("batch", region_name=aws_region)
monkeypatch.setattr(BatchWaitersHook, "conn", batch_client) |
Patch hook object by dummy boto3 ECR client. | def patch_hook(monkeypatch):
"""Patch hook object by dummy boto3 ECR client."""
ecr_client = boto3.client("ecr")
monkeypatch.setattr(EcrHook, "conn", ecr_client) |
A fixture to generate a batch of EKS Clusters on the mocked backend for testing. | def cluster_builder():
"""A fixture to generate a batch of EKS Clusters on the mocked backend for testing."""
class ClusterTestDataFactory:
"""A Factory class for building the Cluster objects."""
def __init__(self, count: int, minimal: bool) -> None:
# Generate 'count' number of Cluster objects.
self.cluster_names: list[str] = generate_clusters(
eks_hook=eks_hook, num_clusters=count, minimal=minimal
)
self.existing_cluster_name: str = self.cluster_names[0]
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_cluster() for the first Cluster.
self.cluster_describe_output: dict = eks_hook.describe_cluster(name=self.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
# Generate a list of the Cluster attributes to be tested when validating results.
self.attributes_to_test: list[tuple] = attributes_to_test(
inputs=ClusterInputs, cluster_name=self.existing_cluster_name
)
def _execute(count: int = 1, minimal: bool = True) -> tuple[EksHook, ClusterTestDataFactory]:
return eks_hook, ClusterTestDataFactory(count=count, minimal=minimal)
with mock_aws():
eks_hook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
yield _execute |
A fixture to generate a batch of EKS Fargate profiles on the mocked backend for testing. | def fargate_profile_builder(cluster_builder):
"""A fixture to generate a batch of EKS Fargate profiles on the mocked backend for testing."""
class FargateProfileTestDataFactory:
"""A Factory class for building the Fargate profile objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name = cluster.existing_cluster_name
# Generate 'count' number of FargateProfile objects.
self.fargate_profile_names = generate_fargate_profiles(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_profiles=count,
minimal=minimal,
)
# Get the name of the first generated profile.
self.existing_fargate_profile_name: str = self.fargate_profile_names[0]
self.nonexistent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_fargate_profiles() for the first profile.
self.fargate_describe_output: dict = eks_hook.describe_fargate_profile(
clusterName=self.cluster_name, fargateProfileName=self.existing_fargate_profile_name
)[ResponseAttributes.FARGATE_PROFILE]
# Generate a list of the Fargate Profile attributes to be tested when validating results.
self.attributes_to_test: list[tuple] = attributes_to_test(
inputs=FargateProfileInputs,
cluster_name=self.cluster_name,
fargate_profile_name=self.existing_fargate_profile_name,
)
def _execute(count: int = 1, minimal: bool = True) -> tuple[EksHook, FargateProfileTestDataFactory]:
return eks_hook, FargateProfileTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute |
A fixture to generate a batch of EKS Managed Nodegroups on the mocked backend for testing. | def nodegroup_builder(cluster_builder):
"""A fixture to generate a batch of EKS Managed Nodegroups on the mocked backend for testing."""
class NodegroupTestDataFactory:
"""A Factory class for building the Nodegroup objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name: str = cluster.existing_cluster_name
# Generate 'count' number of Nodegroup objects.
self.nodegroup_names: list[str] = generate_nodegroups(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_nodegroups=count,
minimal=minimal,
)
# Get the name of the first generated Nodegroup.
self.existing_nodegroup_name: str = self.nodegroup_names[0]
self.nonexistent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_nodegroup() for the first Nodegroup.
self.nodegroup_describe_output: dict = eks_hook.describe_nodegroup(
clusterName=self.cluster_name, nodegroupName=self.existing_nodegroup_name
)[ResponseAttributes.NODEGROUP]
# Generate a list of the Nodegroup attributes to be tested when validating results.
self.attributes_to_test: list[tuple] = attributes_to_test(
inputs=NodegroupInputs,
cluster_name=self.cluster_name,
nodegroup_name=self.existing_nodegroup_name,
)
def _execute(count: int = 1, minimal: bool = True) -> tuple[EksHook, NodegroupTestDataFactory]:
return eks_hook, NodegroupTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute |
Applies regex `pattern` to `arn_under_test` and asserts
that each group matches the provided expected value.
A list entry of None in the 'expected_arn_values' will
assert that the value exists but not match a specific value. | def assert_all_arn_values_are_valid(expected_arn_values, pattern, arn_under_test) -> None:
"""
Applies regex `pattern` to `arn_under_test` and asserts
that each group matches the provided expected value.
A list entry of None in the 'expected_arn_values' will
assert that the value exists but not match a specific value.
"""
findall: list = pattern.findall(arn_under_test)[0]
# findall() returns a list of matches from right to left so it must be reversed
# in order to match the logical order of the 'expected_arn_values' list.
for value in reversed(findall):
expected_value = expected_arn_values.pop()
if expected_value:
assert value in expected_value
else:
assert value
assert region_matches_partition(findall[1], findall[0]) |
Asserts that the raised exception is of the expected type
and the resulting message matches the expected format. | def assert_client_error_exception_thrown(
expected_exception: type[AWSError], expected_msg: str, raised_exception: pytest.ExceptionInfo
) -> None:
"""
Asserts that the raised exception is of the expected type
and the resulting message matches the expected format.
"""
response = raised_exception.value.response[ErrorAttributes.ERROR]
assert response[ErrorAttributes.CODE] == expected_exception.TYPE
assert response[ErrorAttributes.MESSAGE] == expected_msg |
Parse Amazon Provider metadata and find all hooks based on `AwsGenericHook` and return it. | def get_aws_hooks_modules():
"""Parse Amazon Provider metadata and find all hooks based on `AwsGenericHook` and return it."""
hooks_dir = Path(__file__).absolute().parents[5] / "airflow" / "providers" / "amazon" / "aws" / "hooks"
if not hooks_dir.exists():
msg = f"Amazon Provider hooks directory not found: {hooks_dir.__fspath__()!r}"
raise FileNotFoundError(msg)
elif not hooks_dir.is_dir():
raise NotADirectoryError(hooks_dir.__fspath__())
for module in sorted(hooks_dir.glob("*.py")):
name = module.stem
if name.startswith("_"):
continue
module_string = f"airflow.providers.amazon.aws.hooks.{name}"
yield pytest.param(module_string, id=name) |
Test Amazon provider Hooks' signatures.
All hooks should provide thin wrapper around boto3 / aiobotocore,
that mean we should not define additional parameters in Hook parameters.
It should be defined in appropriate methods.
.. code-block:: python
# Bad: Thick wrapper
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class AwsServiceName(AwsBaseHook):
def __init__(self, foo: str, spam: str, *args, **kwargs) -> None:
kwargs.update(dict(client_type="service", resource_type=None))
super().__init__(*args, **kwargs)
self.foo = foo
self.spam = spam
def method1(self):
if self.foo == "bar":
...
def method2(self):
if self.spam == "egg":
...
.. code-block:: python
# Good: Thin wrapper
class AwsServiceName(AwsBaseHook):
def __init__(self, *args, **kwargs) -> None:
kwargs.update(dict(client_type="service", resource_type=None))
super().__init__(*args, **kwargs)
def method1(self, foo: str):
if foo == "bar":
...
def method2(self, spam: str):
if spam == "egg":
... | def test_expected_thin_hooks(hook_module: str):
"""
Test Amazon provider Hooks' signatures.
All hooks should provide thin wrapper around boto3 / aiobotocore,
that mean we should not define additional parameters in Hook parameters.
It should be defined in appropriate methods.
.. code-block:: python
# Bad: Thick wrapper
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class AwsServiceName(AwsBaseHook):
def __init__(self, foo: str, spam: str, *args, **kwargs) -> None:
kwargs.update(dict(client_type="service", resource_type=None))
super().__init__(*args, **kwargs)
self.foo = foo
self.spam = spam
def method1(self):
if self.foo == "bar":
...
def method2(self):
if self.spam == "egg":
...
.. code-block:: python
# Good: Thin wrapper
class AwsServiceName(AwsBaseHook):
def __init__(self, *args, **kwargs) -> None:
kwargs.update(dict(client_type="service", resource_type=None))
super().__init__(*args, **kwargs)
def method1(self, foo: str):
if foo == "bar":
...
def method2(self, spam: str):
if spam == "egg":
...
"""
hooks = get_aws_hooks_from_module(hook_module)
if not hooks:
pytest.skip(reason=f"Module {hook_module!r} doesn't contain subclasses of `AwsGenericHook`.")
errors = [
message
for valid, message in (validate_hook(hook, hook_name, hook_module) for hook, hook_name in hooks)
if not valid and message
]
if errors:
errors_msg = "\n * ".join(errors)
pytest.fail(reason=f"Found errors in {hook_module}:\n * {errors_msg}") |
Returns a NeptuneHook mocked with moto | def neptune_hook() -> Generator[NeptuneHook, None, None]:
"""Returns a NeptuneHook mocked with moto"""
with mock_aws():
yield NeptuneHook(aws_conn_id="aws_default") |
Returns Neptune cluster ID | def neptune_cluster_id(neptune_hook: NeptuneHook) -> str:
"""Returns Neptune cluster ID"""
resp = neptune_hook.conn.create_db_cluster(
DBClusterIdentifier="test-cluster",
Engine="neptune",
)
return resp["DBCluster"]["DBClusterIdentifier"] |
Returns an RdsHook whose underlying connection is mocked with moto | def rds_hook() -> Generator[RdsHook, None, None]:
"""Returns an RdsHook whose underlying connection is mocked with moto"""
with mock_aws():
yield RdsHook(aws_conn_id="aws_default", region_name="us-east-1") |
Creates an RDS DB instance and returns its id | def db_instance_id(rds_hook: RdsHook) -> str:
"""Creates an RDS DB instance and returns its id"""
response = rds_hook.conn.create_db_instance(
DBInstanceIdentifier="testrdshook-db-instance",
DBInstanceClass="db.t4g.micro",
Engine="postgres",
AllocatedStorage=20,
MasterUsername="testrdshook",
MasterUserPassword="testrdshook",
)
return response["DBInstance"]["DBInstanceIdentifier"] |
Creates an RDS DB cluster and returns its id | def db_cluster_id(rds_hook: RdsHook) -> str:
"""Creates an RDS DB cluster and returns its id"""
response = rds_hook.conn.create_db_cluster(
DBClusterIdentifier="testrdshook-db-cluster",
Engine="postgres",
MasterUsername="testrdshook",
MasterUserPassword="testrdshook",
DBClusterInstanceClass="db.t4g.micro",
AllocatedStorage=20,
)
return response["DBCluster"]["DBClusterIdentifier"] |
Creates a mock DB instance snapshot and returns the DBSnapshot dict from the boto response object.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.create_db_snapshot | def db_snapshot(rds_hook: RdsHook, db_instance_id: str) -> DBSnapshotTypeDef:
"""
Creates a mock DB instance snapshot and returns the DBSnapshot dict from the boto response object.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.create_db_snapshot
"""
response = rds_hook.conn.create_db_snapshot(
DBSnapshotIdentifier="testrdshook-db-instance-snapshot", DBInstanceIdentifier=db_instance_id
)
return response["DBSnapshot"] |
Creates a mock DB cluster snapshot and returns the DBClusterSnapshot dict from the boto response object.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.create_db_cluster_snapshot | def db_cluster_snapshot(rds_hook: RdsHook, db_cluster_id: str):
"""
Creates a mock DB cluster snapshot and returns the DBClusterSnapshot dict from the boto response object.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.create_db_cluster_snapshot
"""
response = rds_hook.conn.create_db_cluster_snapshot(
DBClusterSnapshotIdentifier="testrdshook-db-cluster-snapshot", DBClusterIdentifier=db_cluster_id
)
return response["DBClusterSnapshot"] |
Creates an mock RDS event subscription and returns its name | def event_subscription_name(rds_hook: RdsHook, db_instance_id: str) -> str:
"""Creates an mock RDS event subscription and returns its name"""
response = rds_hook.conn.create_event_subscription(
SubscriptionName="testrdshook-event-subscription",
SnsTopicArn="test",
SourceType="db-instance",
SourceIds=[db_instance_id],
Enabled=True,
)
return response["EventSubscription"]["CustSubscriptionId"] |
Verify what is the outcome when the unify_bucket_name_and_key and provide_bucket_name
decorators are combined.
The one case (at least in this test) where the order makes a difference is when
user provides a full s3 key, and also has a connection with a bucket defined,
and does not provide a bucket in the method call. In this case, if we unify
first, then we (desirably) get the bucket from the key. If we provide bucket first,
something undesirable happens. The bucket from the connection is used, which means
we don't respect the full key provided. Further, the full key is not made relative,
which would cause the actual request to fail. For this reason we want to put unify
first. | def test_unify_and_provide_bucket_name_combination(
mock_base, key_kind, has_conn, has_bucket, precedence, expected, caplog
):
"""
Verify what is the outcome when the unify_bucket_name_and_key and provide_bucket_name
decorators are combined.
The one case (at least in this test) where the order makes a difference is when
user provides a full s3 key, and also has a connection with a bucket defined,
and does not provide a bucket in the method call. In this case, if we unify
first, then we (desirably) get the bucket from the key. If we provide bucket first,
something undesirable happens. The bucket from the connection is used, which means
we don't respect the full key provided. Further, the full key is not made relative,
which would cause the actual request to fail. For this reason we want to put unify
first.
"""
if has_conn == "with_conn":
c = Connection(extra={"service_config": {"s3": {"bucket_name": "conn_bucket"}}})
else:
c = Connection()
key = "key.txt" if key_kind == "rel_key" else "s3://key_bucket/key.txt"
if has_bucket == "with_bucket":
kwargs = {"bucket_name": "kwargs_bucket", "key": key}
else:
kwargs = {"key": key}
mock_base.return_value = c
if precedence == "unify": # unify to be processed before provide
class MyHook(S3Hook):
@unify_bucket_name_and_key
@provide_bucket_name
def do_something(self, bucket_name=None, key=None):
return bucket_name, key
else:
with caplog.at_level("WARNING"):
class MyHook(S3Hook):
@provide_bucket_name
@unify_bucket_name_and_key
def do_something(self, bucket_name=None, key=None):
return bucket_name, key
assert caplog.records[0].message == "`unify_bucket_name_and_key` should wrap `provide_bucket_name`."
hook = MyHook()
assert list(hook.do_something(**kwargs)) == expected |
If we unify first, then we (desirably) get the bucket from the key. If we provide bucket first,
something undesirable happens. The bucket from the connection is used, which means
we don't respect the full key provided. Further, the full key is not made relative,
which would cause the actual request to fail. For this reason we want to put unify
first. | def test_unify_and_provide_ordered_properly():
"""
If we unify first, then we (desirably) get the bucket from the key. If we provide bucket first,
something undesirable happens. The bucket from the connection is used, which means
we don't respect the full key provided. Further, the full key is not made relative,
which would cause the actual request to fail. For this reason we want to put unify
first.
"""
code = inspect.getsource(S3Hook)
matches = re.findall(r"@provide_bucket_name\s+@unify_bucket_name_and_key", code, re.MULTILINE)
if matches:
pytest.fail("@unify_bucket_name_and_key should be applied before @provide_bucket_name in S3Hook") |
Helper for create mock operator class with extra links | def link_test_operator(*links):
"""Helper for create mock operator class with extra links"""
class LinkTestOperator(MockOperator):
operator_extra_links = tuple(c() for c in links)
return LinkTestOperator |
Subsets and Splits