response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Tests that the 'fail_task' method of the OpenLineageAdapter is invoked with the correct arguments.
This test ensures that the job name is accurately retrieved and included, along with the generated
run_id and task metadata. By mocking the job name retrieval and the run_id generation,
the test verifies the integrity and consistency of the data passed to the adapter during task
failure events, thus confirming that the adapter's failure handling is functioning as expected. | def test_adapter_fail_task_is_called_with_proper_arguments(mock_get_job_name, mocked_adapter, mock_disabled):
"""Tests that the 'fail_task' method of the OpenLineageAdapter is invoked with the correct arguments.
This test ensures that the job name is accurately retrieved and included, along with the generated
run_id and task metadata. By mocking the job name retrieval and the run_id generation,
the test verifies the integrity and consistency of the data passed to the adapter during task
failure events, thus confirming that the adapter's failure handling is functioning as expected.
"""
def mock_task_id(dag_id, task_id, execution_date, try_number):
return f"{dag_id}.{task_id}.{execution_date}.{try_number}"
listener, task_instance = _create_listener_and_task_instance()
mock_get_job_name.return_value = "job_name"
mocked_adapter.build_task_instance_run_id.side_effect = mock_task_id
mocked_adapter.build_dag_run_id.side_effect = lambda x, y: f"{x}.{y}"
mock_disabled.return_value = False
listener.on_task_instance_failed(None, task_instance, None)
listener.adapter.fail_task.assert_called_once_with(
end_time="2023-01-03T13:01:01",
job_name="job_name",
parent_job_name="dag_id",
parent_run_id="dag_id.dag_run_run_id",
run_id="dag_id.task_id.execution_date.1",
task=listener.extractor_manager.extract_metadata(),
) |
Tests that the 'complete_task' method of the OpenLineageAdapter is called with the correct arguments.
It checks that the job name is correctly retrieved and passed,
along with the run_id and task metadata. The test also simulates changes in the try_number
attribute of the task instance, as it would occur in Airflow, to ensure that the run_id is updated
accordingly. This helps confirm the consistency and correctness of the data passed to the adapter
during the task's lifecycle events. | def test_adapter_complete_task_is_called_with_proper_arguments(
mock_get_job_name, mocked_adapter, mock_disabled
):
"""Tests that the 'complete_task' method of the OpenLineageAdapter is called with the correct arguments.
It checks that the job name is correctly retrieved and passed,
along with the run_id and task metadata. The test also simulates changes in the try_number
attribute of the task instance, as it would occur in Airflow, to ensure that the run_id is updated
accordingly. This helps confirm the consistency and correctness of the data passed to the adapter
during the task's lifecycle events.
"""
def mock_task_id(dag_id, task_id, execution_date, try_number):
return f"{dag_id}.{task_id}.{execution_date}.{try_number}"
listener, task_instance = _create_listener_and_task_instance()
mock_get_job_name.return_value = "job_name"
mocked_adapter.build_task_instance_run_id.side_effect = mock_task_id
mocked_adapter.build_dag_run_id.side_effect = lambda x, y: f"{x}.{y}"
mock_disabled.return_value = False
listener.on_task_instance_success(None, task_instance, None)
# This run_id will be different as we did NOT simulate increase of the try_number attribute,
# which happens in Airflow.
listener.adapter.complete_task.assert_called_once_with(
end_time="2023-01-03T13:01:01",
job_name="job_name",
parent_job_name="dag_id",
parent_run_id="dag_id.dag_run_run_id",
run_id="dag_id.task_id.execution_date.0",
task=listener.extractor_manager.extract_metadata(),
)
# Now we simulate the increase of try_number, and the run_id should reflect that change.
listener.adapter.complete_task.reset_mock()
task_instance.try_number += 1
listener.on_task_instance_success(None, task_instance, None)
listener.adapter.complete_task.assert_called_once_with(
end_time="2023-01-03T13:01:01",
job_name="job_name",
parent_job_name="dag_id",
parent_run_id="dag_id.dag_run_run_id",
run_id="dag_id.task_id.execution_date.1",
task=listener.extractor_manager.extract_metadata(),
) |
Tests that the run_id remains constant across different methods of the listener.
It ensures that the run_id generated for starting, failing, and completing a task is consistent,
reflecting the task's identity and execution context. The test also simulates the change in the
try_number attribute, as it would occur in Airflow, to verify that the run_id updates accordingly. | def test_run_id_is_constant_across_all_methods(mocked_adapter):
"""Tests that the run_id remains constant across different methods of the listener.
It ensures that the run_id generated for starting, failing, and completing a task is consistent,
reflecting the task's identity and execution context. The test also simulates the change in the
try_number attribute, as it would occur in Airflow, to verify that the run_id updates accordingly.
"""
def mock_task_id(dag_id, task_id, execution_date, try_number):
return f"{dag_id}.{task_id}.{execution_date}.{try_number}"
listener, task_instance = _create_listener_and_task_instance()
mocked_adapter.build_task_instance_run_id.side_effect = mock_task_id
listener.on_task_instance_running(None, task_instance, None)
expected_run_id = listener.adapter.start_task.call_args.kwargs["run_id"]
assert expected_run_id == "dag_id.task_id.execution_date.1"
listener.on_task_instance_failed(None, task_instance, None)
assert listener.adapter.fail_task.call_args.kwargs["run_id"] == expected_run_id
# This run_id will be different as we did NOT simulate increase of the try_number attribute,
# which happens in Airflow.
listener.on_task_instance_success(None, task_instance, None)
assert listener.adapter.complete_task.call_args.kwargs["run_id"] == "dag_id.task_id.execution_date.0"
# Now we simulate the increase of try_number, and the run_id should reflect that change.
# This is how airflow works, and that's why we expect the run_id to remain constant across all methods.
task_instance.try_number += 1
listener.on_task_instance_success(None, task_instance, None)
assert listener.adapter.complete_task.call_args.kwargs["run_id"] == expected_run_id |
Tests the OpenLineageListener's response when a task instance is in the running state.
This test ensures that when an Airflow task instance transitions to the running state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance. | def test_running_task_correctly_calls_openlineage_adapter_run_id_method():
"""Tests the OpenLineageListener's response when a task instance is in the running state.
This test ensures that when an Airflow task instance transitions to the running state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance.
"""
listener, task_instance = _create_listener_and_task_instance()
listener.on_task_instance_running(None, task_instance, None)
listener.adapter.build_task_instance_run_id.assert_called_once_with(
dag_id="dag_id",
task_id="task_id",
execution_date="execution_date",
try_number=1,
) |
Tests the OpenLineageListener's response when a task instance is in the failed state.
This test ensures that when an Airflow task instance transitions to the failed state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance. | def test_failed_task_correctly_calls_openlineage_adapter_run_id_method(mock_adapter):
"""Tests the OpenLineageListener's response when a task instance is in the failed state.
This test ensures that when an Airflow task instance transitions to the failed state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance.
"""
listener, task_instance = _create_listener_and_task_instance()
listener.on_task_instance_failed(None, task_instance, None)
mock_adapter.build_task_instance_run_id.assert_called_once_with(
dag_id="dag_id",
task_id="task_id",
execution_date="execution_date",
try_number=1,
) |
Tests the OpenLineageListener's response when a task instance is in the success state.
This test ensures that when an Airflow task instance transitions to the success state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance. | def test_successful_task_correctly_calls_openlineage_adapter_run_id_method(mock_adapter):
"""Tests the OpenLineageListener's response when a task instance is in the success state.
This test ensures that when an Airflow task instance transitions to the success state,
the OpenLineageAdapter's `build_task_instance_run_id` method is called exactly once with the correct
parameters derived from the task instance.
"""
listener, task_instance = _create_listener_and_task_instance()
listener.on_task_instance_success(None, task_instance, None)
mock_adapter.build_task_instance_run_id.assert_called_once_with(
dag_id="dag_id",
task_id="task_id",
execution_date="execution_date",
try_number=0,
) |
Validates the listener's on-failure method is called before try_number increment happens.
This test ensures that when a task instance fails, Airflow's listener method for
task failure (`on_task_instance_failed`) is invoked before the increment of the
`try_number` attribute happens. A custom exception simulates task failure, and the test
captures the `try_number` at the moment of this method call. | def test_listener_on_task_instance_failed_is_called_before_try_number_increment(mock_listener):
"""Validates the listener's on-failure method is called before try_number increment happens.
This test ensures that when a task instance fails, Airflow's listener method for
task failure (`on_task_instance_failed`) is invoked before the increment of the
`try_number` attribute happens. A custom exception simulates task failure, and the test
captures the `try_number` at the moment of this method call.
"""
captured_try_numbers = {}
_setup_mock_listener(mock_listener, captured_try_numbers)
# Just to make sure no error interferes with the test, and we do not suppress it by accident
class CustomError(Exception):
pass
def fail_callable(**kwargs):
raise CustomError("Simulated task failure")
_, task_instance = _create_test_dag_and_task(fail_callable, "failure")
# try_number before execution
assert task_instance.try_number == 1
with suppress(CustomError):
task_instance.run()
# try_number at the moment of function being called
assert captured_try_numbers["running"] == 1
assert captured_try_numbers["failed"] == 1
# try_number after task has been executed
assert task_instance.try_number == 2 |
Validates the listener's on-success method is called before try_number increment happens.
This test ensures that when a task instance successfully completes, the
`on_task_instance_success` method of Airflow's listener is called with an
incremented `try_number` compared to the `try_number` before execution.
The test simulates a successful task execution and captures the `try_number` at the method call. | def test_listener_on_task_instance_success_is_called_after_try_number_increment(mock_listener):
"""Validates the listener's on-success method is called before try_number increment happens.
This test ensures that when a task instance successfully completes, the
`on_task_instance_success` method of Airflow's listener is called with an
incremented `try_number` compared to the `try_number` before execution.
The test simulates a successful task execution and captures the `try_number` at the method call.
"""
captured_try_numbers = {}
_setup_mock_listener(mock_listener, captured_try_numbers)
def success_callable(**kwargs):
return None
_, task_instance = _create_test_dag_and_task(success_callable, "success")
# try_number before execution
assert task_instance.try_number == 1
task_instance.run()
# try_number at the moment of function being called
assert captured_try_numbers["running"] == 1
assert captured_try_numbers["success"] == 2
# try_number after task has been executed
assert task_instance.try_number == 2 |
Fixture to provide a dummy Airflow DAG for testing. | def dummy_dag():
"""Fixture to provide a dummy Airflow DAG for testing."""
return DAG(dag_id="test_dag", start_date=datetime(2023, 9, 29)) |
Test that PostgreSQL operator could template the same fields as SQLExecuteQueryOperator | def test_parameters_are_templatized(create_task_instance_of_operator):
"""Test that PostgreSQL operator could template the same fields as SQLExecuteQueryOperator"""
ti = create_task_instance_of_operator(
PostgresOperator,
postgres_conn_id="{{ param.conn_id }}",
sql="SELECT * FROM {{ param.table }} WHERE spam = %(spam)s;",
parameters={"spam": "{{ param.bar }}"},
dag_id="test-postgres-op-parameters-are-templatized",
task_id="test-task",
)
task: PostgresOperator = ti.render_templates({"param": {"conn_id": "pg", "table": "foo", "bar": "egg"}})
assert task.conn_id == "pg"
assert task.sql == "SELECT * FROM foo WHERE spam = %(spam)s;"
assert task.parameters == {"spam": "egg"} |
Create tests connections. | def slack_api_connections():
"""Create tests connections."""
connections = [
Connection(
conn_id=SLACK_API_DEFAULT_CONN_ID,
conn_type=CONN_TYPE,
password=MOCK_SLACK_API_TOKEN,
),
Connection(
conn_id="compat_http_type",
conn_type="http",
password=MOCK_SLACK_API_TOKEN,
),
Connection(
conn_id="empty_slack_connection",
conn_type=CONN_TYPE,
),
]
with pytest.MonkeyPatch.context() as mp:
for conn in connections:
mp.setenv(f"AIRFLOW_CONN_{conn.conn_id.upper()}", conn.get_uri())
yield |
Create tests connections. | def slack_webhook_connections():
"""Create tests connections."""
connections = [
Connection(
conn_id=SlackWebhookHook.default_conn_name,
conn_type=CONN_TYPE,
password=TEST_TOKEN,
),
Connection(
conn_id="conn_full_url_connection",
conn_type=CONN_TYPE,
password=TEST_WEBHOOK_URL,
),
Connection(
conn_id="conn_full_url_connection_with_host",
conn_type=CONN_TYPE,
host="http://example.org/hooks/",
password=TEST_WEBHOOK_URL,
),
Connection(
conn_id="conn_host_with_schema",
conn_type=CONN_TYPE,
host="https://hooks.slack.com/services/",
password=f"/{TEST_TOKEN}",
),
Connection(
conn_id="conn_host_without_schema",
conn_type=CONN_TYPE,
host="hooks.slack.com/services/",
password=f"/{TEST_TOKEN}",
),
Connection(
conn_id="conn_parts",
conn_type=CONN_TYPE,
host="hooks.slack.com/services",
schema="https",
password=f"/{TEST_TOKEN}",
),
Connection(
conn_id="conn_deprecated_extra",
conn_type=CONN_TYPE,
host="https://hooks.slack.com/services/",
extra={"webhook_token": TEST_TOKEN},
),
Connection(
conn_id="conn_custom_endpoint_1",
conn_type=CONN_TYPE,
schema=TEST_CUSTOM_SCHEMA,
host=TEST_CUSTOM_ENDPOINT,
password=TEST_TOKEN,
),
Connection(
conn_id="conn_custom_endpoint_2",
conn_type=CONN_TYPE,
host=f"{TEST_CUSTOM_SCHEMA}://{TEST_CUSTOM_ENDPOINT}",
password=TEST_TOKEN,
),
Connection(
conn_id="conn_custom_endpoint_3",
conn_type=CONN_TYPE,
password=TEST_CUSTOM_WEBHOOK_URL,
),
Connection(
conn_id="conn_empty",
conn_type=CONN_TYPE,
),
Connection(
conn_id="conn_password_empty_1",
conn_type=CONN_TYPE,
host="https://hooks.slack.com/services/",
),
Connection(
conn_id="conn_password_empty_2",
conn_type=CONN_TYPE,
schema="http",
host="some.netloc",
),
# Not supported anymore
Connection(conn_id="conn_token_in_host_1", conn_type=CONN_TYPE, host=TEST_WEBHOOK_URL),
Connection(
conn_id="conn_token_in_host_2",
conn_type=CONN_TYPE,
schema="https",
host="hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
),
]
with pytest.MonkeyPatch.context() as mp:
for conn in connections:
mp.setenv(f"AIRFLOW_CONN_{conn.conn_id.upper()}", conn.get_uri())
yield |
Create mock response for success state | def create_successful_response_mock(content):
"""Create mock response for success state"""
response = mock.MagicMock()
response.json.return_value = content
response.status_code = 200
return response |
create mock response for post side effect | def create_post_side_effect(status_code=429):
"""create mock response for post side effect"""
response = mock.MagicMock()
response.status_code = status_code
response.reason = "test"
response.raise_for_status.side_effect = requests.exceptions.HTTPError(response=response)
return response |
Test the execute function in case where SQL query was successful. | def test_exec_success(sql, return_last, split_statement, hook_results, hook_descriptions, expected_results):
"""
Test the execute function in case where SQL query was successful.
"""
with patch("airflow.providers.common.sql.operators.sql.BaseSQLOperator.get_db_hook") as get_db_hook_mock:
op = SnowflakeOperator(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
dbapi_hook = MagicMock()
get_db_hook_mock.return_value = dbapi_hook
dbapi_hook.run.return_value = hook_results
dbapi_hook.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
dbapi_hook.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
) |
Fixture to create a WeaviateHook instance for testing. | def weaviate_hook():
"""
Fixture to create a WeaviateHook instance for testing.
"""
mock_conn = Mock()
# Patch the WeaviateHook get_connection method to return the mock connection
with mock.patch.object(WeaviateHook, "get_connection", return_value=mock_conn):
hook = WeaviateHook(conn_id=TEST_CONN_ID)
return hook |
Test the create_class method of WeaviateHook. | def test_create_class(weaviate_hook):
"""
Test the create_class method of WeaviateHook.
"""
# Mock the Weaviate Client
mock_client = MagicMock()
weaviate_hook.get_conn = MagicMock(return_value=mock_client)
# Define test class JSON
test_class_json = {
"class": "TestClass",
"description": "Test class for unit testing",
}
# Test the create_class method
weaviate_hook.create_class(test_class_json)
# Assert that the create_class method was called with the correct arguments
mock_client.schema.create_class.assert_called_once_with(test_class_json) |
Test the create_schema method of WeaviateHook. | def test_create_schema(weaviate_hook):
"""
Test the create_schema method of WeaviateHook.
"""
# Mock the Weaviate Client
mock_client = MagicMock()
weaviate_hook.get_conn = MagicMock(return_value=mock_client)
# Define test schema JSON
test_schema_json = {
"classes": [
{
"class": "TestClass",
"description": "Test class for unit testing",
}
]
}
# Test the create_schema method
weaviate_hook.create_schema(test_schema_json)
# Assert that the create_schema method was called with the correct arguments
mock_client.schema.create.assert_called_once_with(test_schema_json) |
Test the batch_data method of WeaviateHook. | def test_batch_data(data, expected_length, weaviate_hook):
"""
Test the batch_data method of WeaviateHook.
"""
# Mock the Weaviate Client
mock_client = MagicMock()
weaviate_hook.get_conn = MagicMock(return_value=mock_client)
# Define test data
test_class_name = "TestClass"
# Test the batch_data method
weaviate_hook.batch_data(test_class_name, data)
# Assert that the batch_data method was called with the correct arguments
mock_client.batch.configure.assert_called_once()
mock_batch_context = mock_client.batch.__enter__.return_value
assert mock_batch_context.add_data_object.call_count == expected_length |
Test to ensure retrying working as expected | def test_batch_data_retry(get_conn, weaviate_hook):
"""Test to ensure retrying working as expected"""
data = [{"name": "chandler"}, {"name": "joey"}, {"name": "ross"}]
response = requests.Response()
response.status_code = 429
error = requests.exceptions.HTTPError()
error.response = response
side_effect = [None, error, None, error, None]
get_conn.return_value.batch.__enter__.return_value.add_data_object.side_effect = side_effect
weaviate_hook.batch_data("TestClass", data)
assert get_conn.return_value.batch.__enter__.return_value.add_data_object.call_count == len(side_effect) |
Test if schema_json is path to a json file | def test_upsert_schema_json_file_param(get_schema, create_schema, load, open, weaviate_hook):
"""Test if schema_json is path to a json file"""
get_schema.return_value = {"classes": [{"class": "A"}, {"class": "B"}]}
load.return_value = {
"B": {"class": "B"},
"C": {"class": "C"},
}
weaviate_hook.create_or_replace_classes(schema_json="/tmp/some_temp_file.json", existing="ignore")
create_schema.assert_called_once_with({"classes": [{"class": "C"}]}) |
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies
set up using ExternalTaskMarker and ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
|
|
dag_1: ---> task_a_1 >> task_b_1
|
|
dag_2: ---> task_a_2 >> task_b_2
|
|
dag_3: ---> task_a_3 >> task_b_3 | def dag_bag_ext():
"""
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies
set up using ExternalTaskMarker and ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
|
|
dag_1: ---> task_a_1 >> task_b_1
|
|
dag_2: ---> task_a_2 >> task_b_2
|
|
dag_3: ---> task_a_3 >> task_b_3
"""
clear_db_runs()
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule=None)
task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0
)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule=None)
task_a_1 = ExternalTaskSensor(
task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1
)
task_b_1 = ExternalTaskMarker(
task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1
)
task_a_1 >> task_b_1
dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule=None)
task_a_2 = ExternalTaskSensor(
task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2
)
task_b_2 = ExternalTaskMarker(
task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2
)
task_a_2 >> task_b_2
dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule=None)
task_a_3 = ExternalTaskSensor(
task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3
)
task_b_3 = EmptyOperator(task_id="task_b_3", dag=dag_3)
task_a_3 >> task_b_3
for dag in [dag_0, dag_1, dag_2, dag_3]:
dag_bag.bag_dag(dag=dag, root_dag=dag)
yield dag_bag
clear_db_runs() |
Create a DagBag with two DAGs looking like this. task_1 of child_dag_1 on day 1 depends on
task_0 of parent_dag_0 on day 1. Therefore, when task_0 of parent_dag_0 on day 1 and day 2
are cleared, parent_dag_0 DagRuns need to be set to running on both days, but child_dag_1
only needs to be set to running on day 1.
day 1 day 2
parent_dag_0 task_0 task_0
|
|
v
child_dag_1 task_1 task_1 | def dag_bag_parent_child():
"""
Create a DagBag with two DAGs looking like this. task_1 of child_dag_1 on day 1 depends on
task_0 of parent_dag_0 on day 1. Therefore, when task_0 of parent_dag_0 on day 1 and day 2
are cleared, parent_dag_0 DagRuns need to be set to running on both days, but child_dag_1
only needs to be set to running on day 1.
day 1 day 2
parent_dag_0 task_0 task_0
|
|
v
child_dag_1 task_1 task_1
"""
clear_db_runs()
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
day_1 = DEFAULT_DATE
with DAG("parent_dag_0", start_date=day_1, schedule=None) as dag_0:
task_0 = ExternalTaskMarker(
task_id="task_0",
external_dag_id="child_dag_1",
external_task_id="task_1",
execution_date=day_1.isoformat(),
recursion_depth=3,
)
with DAG("child_dag_1", start_date=day_1, schedule=None) as dag_1:
ExternalTaskSensor(
task_id="task_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_0.task_id,
execution_date_fn=lambda logical_date: day_1 if logical_date == day_1 else [],
mode="reschedule",
)
for dag in [dag_0, dag_1]:
dag_bag.bag_dag(dag=dag, root_dag=dag)
yield dag_bag
clear_db_runs() |
Run all tasks in the DAGs in the given dag_bag. Return the TaskInstance objects as a dict
keyed by task_id. | def run_tasks(dag_bag, execution_date=DEFAULT_DATE, session=None):
"""
Run all tasks in the DAGs in the given dag_bag. Return the TaskInstance objects as a dict
keyed by task_id.
"""
tis = {}
for dag in dag_bag.dags.values():
dagrun = dag.create_dagrun(
state=State.RUNNING,
execution_date=execution_date,
start_date=execution_date,
run_type=DagRunType.MANUAL,
session=session,
)
# we use sorting by task_id here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
tasks = sorted(dagrun.task_instances, key=lambda ti: ti.task_id)
for ti in tasks:
ti.refresh_from_task(dag.get_task(ti.task_id))
tis[ti.task_id] = ti
ti.run(session=session)
session.flush()
session.merge(ti)
assert_ti_state_equal(ti, State.SUCCESS)
return tis |
Assert state of task_instances equals the given state. | def assert_ti_state_equal(task_instance, state):
"""
Assert state of task_instances equals the given state.
"""
task_instance.refresh_from_db()
assert task_instance.state == state |
Clear the task and its downstream tasks recursively for the dag in the given dagbag. | def clear_tasks(
dag_bag,
dag,
task,
session,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
dry_run=False,
):
"""
Clear the task and its downstream tasks recursively for the dag in the given dagbag.
"""
partial: DAG = dag.partial_subset(task_ids_or_regex=[task.task_id], include_downstream=True)
return partial.clear(
start_date=start_date,
end_date=end_date,
dag_bag=dag_bag,
dry_run=dry_run,
session=session,
) |
Test clearing tasks across DAGs. | def test_external_task_marker_transitive(dag_bag_ext):
"""
Test clearing tasks across DAGs.
"""
tis = run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
clear_tasks(dag_bag_ext, dag_0, task_a_0)
ti_a_0 = tis["task_a_0"]
ti_b_3 = tis["task_b_3"]
assert_ti_state_equal(ti_a_0, State.NONE)
assert_ti_state_equal(ti_b_3, State.NONE) |
Test clearing tasks across DAGs and make sure the right DagRuns are activated. | def test_external_task_marker_clear_activate(dag_bag_parent_child, session):
"""
Test clearing tasks across DAGs and make sure the right DagRuns are activated.
"""
dag_bag = dag_bag_parent_child
day_1 = DEFAULT_DATE
day_2 = DEFAULT_DATE + timedelta(days=1)
run_tasks(dag_bag, execution_date=day_1)
run_tasks(dag_bag, execution_date=day_2)
# Assert that dagruns of all the affected dags are set to SUCCESS before tasks are cleared.
for dag, execution_date in itertools.product(dag_bag.dags.values(), [day_1, day_2]):
dagrun = dag.get_dagrun(execution_date=execution_date, session=session)
dagrun.set_state(State.SUCCESS)
session.flush()
dag_0 = dag_bag.get_dag("parent_dag_0")
task_0 = dag_0.get_task("task_0")
clear_tasks(dag_bag, dag_0, task_0, start_date=day_1, end_date=day_2, session=session)
# Assert that dagruns of all the affected dags are set to QUEUED after tasks are cleared.
# Unaffected dagruns should be left as SUCCESS.
dagrun_0_1 = dag_bag.get_dag("parent_dag_0").get_dagrun(execution_date=day_1, session=session)
dagrun_0_2 = dag_bag.get_dag("parent_dag_0").get_dagrun(execution_date=day_2, session=session)
dagrun_1_1 = dag_bag.get_dag("child_dag_1").get_dagrun(execution_date=day_1, session=session)
dagrun_1_2 = dag_bag.get_dag("child_dag_1").get_dagrun(execution_date=day_2, session=session)
assert dagrun_0_1.state == State.QUEUED
assert dagrun_0_2.state == State.QUEUED
assert dagrun_1_1.state == State.QUEUED
assert dagrun_1_2.state == State.SUCCESS |
Test clearing tasks with no end_date. This is the case when users clear tasks with
Future, Downstream and Recursive selected. | def test_external_task_marker_future(dag_bag_ext):
"""
Test clearing tasks with no end_date. This is the case when users clear tasks with
Future, Downstream and Recursive selected.
"""
date_0 = DEFAULT_DATE
date_1 = DEFAULT_DATE + timedelta(days=1)
tis_date_0 = run_tasks(dag_bag_ext, execution_date=date_0)
tis_date_1 = run_tasks(dag_bag_ext, execution_date=date_1)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
# This should clear all tasks on dag_0 to dag_3 on both date_0 and date_1
clear_tasks(dag_bag_ext, dag_0, task_a_0, end_date=None)
ti_a_0_date_0 = tis_date_0["task_a_0"]
ti_b_3_date_0 = tis_date_0["task_b_3"]
ti_b_3_date_1 = tis_date_1["task_b_3"]
assert_ti_state_equal(ti_a_0_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_1, State.NONE) |
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared. | def test_external_task_marker_exception(dag_bag_ext):
"""
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared.
"""
run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
task_b_0 = dag_0.get_task("task_b_0")
task_b_0.recursion_depth = 2
with pytest.raises(AirflowException, match="Maximum recursion depth 2"):
clear_tasks(dag_bag_ext, dag_0, task_a_0) |
Create a DagBag with DAGs having cyclic dependencies set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| ^
| |
dag_n: | ---> task_a_n >> task_b_n
| |
----------------------------------------------------- | def dag_bag_cyclic():
"""
Create a DagBag with DAGs having cyclic dependencies set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| ^
| |
dag_n: | ---> task_a_n >> task_b_n
| |
-----------------------------------------------------
"""
def _factory(depth: int) -> DagBag:
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dags = []
with DAG("dag_0", start_date=DEFAULT_DATE, schedule=None) as dag:
dags.append(dag)
task_a_0 = EmptyOperator(task_id="task_a_0")
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3
)
task_a_0 >> task_b_0
for n in range(1, depth):
with DAG(f"dag_{n}", start_date=DEFAULT_DATE, schedule=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{n}",
external_dag_id=f"dag_{n-1}",
external_task_id=f"task_b_{n-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{n}",
external_dag_id=f"dag_{n+1}",
external_task_id=f"task_a_{n+1}",
recursion_depth=3,
)
task_a >> task_b
# Create the last dag which loops back
with DAG(f"dag_{depth}", start_date=DEFAULT_DATE, schedule=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{depth}",
external_dag_id=f"dag_{depth-1}",
external_task_id=f"task_b_{depth-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{depth}",
external_dag_id="dag_0",
external_task_id="task_a_0",
recursion_depth=2,
)
task_a >> task_b
for dag in dags:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
return _factory |
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised. | def test_external_task_marker_cyclic_deep(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised.
"""
dag_bag = dag_bag_cyclic(10)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
with pytest.raises(AirflowException, match="Maximum recursion depth 3"):
clear_tasks(dag_bag, dag_0, task_a_0) |
Tests clearing across multiple DAGs that have cyclic dependencies shallower
than recursion_depth | def test_external_task_marker_cyclic_shallow(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies shallower
than recursion_depth
"""
dag_bag = dag_bag_cyclic(2)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
tis = clear_tasks(dag_bag, dag_0, task_a_0, dry_run=True)
assert [
("dag_0", "task_a_0"),
("dag_0", "task_b_0"),
("dag_1", "task_a_1"),
("dag_1", "task_b_1"),
("dag_2", "task_a_2"),
("dag_2", "task_b_2"),
] == sorted((ti.dag_id, ti.task_id) for ti in tis) |
Create a DagBag containing two DAGs, linked by multiple ExternalTaskMarker. | def dag_bag_multiple():
"""
Create a DagBag containing two DAGs, linked by multiple ExternalTaskMarker.
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
daily_dag = DAG("daily_dag", start_date=DEFAULT_DATE, schedule="@daily")
agg_dag = DAG("agg_dag", start_date=DEFAULT_DATE, schedule="@daily")
dag_bag.bag_dag(dag=daily_dag, root_dag=daily_dag)
dag_bag.bag_dag(dag=agg_dag, root_dag=agg_dag)
daily_task = EmptyOperator(task_id="daily_tas", dag=daily_dag)
begin = EmptyOperator(task_id="begin", dag=agg_dag)
for i in range(8):
task = ExternalTaskMarker(
task_id=f"{daily_task.task_id}_{i}",
external_dag_id=daily_dag.dag_id,
external_task_id=daily_task.task_id,
execution_date=f"{{{{ macros.ds_add(ds, -1 * {i}) }}}}",
dag=agg_dag,
)
begin >> task
return dag_bag |
Test clearing a dag that has multiple ExternalTaskMarker. | def test_clear_multiple_external_task_marker(dag_bag_multiple):
"""
Test clearing a dag that has multiple ExternalTaskMarker.
"""
agg_dag = dag_bag_multiple.get_dag("agg_dag")
tis = run_tasks(dag_bag_multiple, execution_date=DEFAULT_DATE)
session = settings.Session()
try:
qry = session.query(TaskInstance).filter(
TaskInstance.state == State.NONE, TaskInstance.dag_id.in_(dag_bag_multiple.dag_ids)
)
assert agg_dag.clear(dag_bag=dag_bag_multiple) == len(tis) == qry.count() == 10
finally:
session.close() |
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous execution_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+ | def dag_bag_head_tail():
"""
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous execution_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
with DAG("head_tail", start_date=DEFAULT_DATE, schedule="@daily") as dag:
head = ExternalTaskSensor(
task_id="head",
external_dag_id=dag.dag_id,
external_task_id="tail",
execution_delta=timedelta(days=1),
mode="reschedule",
)
body = EmptyOperator(task_id="body")
tail = ExternalTaskMarker(
task_id="tail",
external_dag_id=dag.dag_id,
external_task_id=head.task_id,
execution_date="{{ macros.ds_add(ds, 1) }}",
)
head >> body >> tail
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag |
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous execution_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+ | def dag_bag_head_tail_mapped_tasks():
"""
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous execution_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
with DAG("head_tail", start_date=DEFAULT_DATE, schedule="@daily") as dag:
@task_deco
def dummy_task(x: int):
return x
head = ExternalTaskSensor(
task_id="head",
external_dag_id=dag.dag_id,
external_task_id="tail",
execution_delta=timedelta(days=1),
mode="reschedule",
)
body = dummy_task.expand(x=range(5))
tail = ExternalTaskMarker(
task_id="tail",
external_dag_id=dag.dag_id,
external_task_id=head.task_id,
execution_date="{{ macros.ds_add(ds, 1) }}",
)
head >> body >> tail
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag |
Loads DAGs from a module for test. | def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags |
Make very simple DAG to verify serialization result. | def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id="simple_dag",
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id="custom_task")
BashOperator(
task_id="bash_task",
bash_command="echo {{ task.task_id }}",
owner="airflow",
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {"simple_dag": dag} |
Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization. | def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {"start_date": datetime(2019, 7, 10)}
dag = DAG(
"user_defined_macro_filter_dag",
default_args=default_args,
user_defined_macros={
"next_execution_date": compute_next_execution_date,
},
user_defined_filters={"hello": lambda name: f"Hello {name}"},
catchup=False,
)
BashOperator(
task_id="echo",
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag} |
Collects DAGs to test. | def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags", # TODO: Remove once AIP-47 is completed
"airflow/providers/*/*/example_dags", # TODO: Remove once AIP-47 is completed
"tests/system/providers/*/",
"tests/system/providers/*/*/",
]
excluded_patterns = [f"{ROOT_FOLDER}/{excluded_pattern}" for excluded_pattern in get_excluded_patterns()]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
if any([directory.startswith(excluded_pattern) for excluded_pattern in excluded_patterns]):
continue
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags |
Create a simple_dag variant that uses timetable instead of schedule_interval. | def get_timetable_based_simple_dag(timetable):
"""Create a simple_dag variant that uses timetable instead of schedule_interval."""
dag = collect_dags(["airflow/example_dags"])["simple_dag"]
dag.timetable = timetable
dag.schedule_interval = timetable.summary
return dag |
Validate pickle in a subprocess. | def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None) |
Patch plugins manager to always and only return our custom timetable. | def timetable_plugin(monkeypatch):
"""Patch plugins manager to always and only return our custom timetable."""
from airflow import plugins_manager
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{"tests.test_utils.timetables.CustomSerializationTimetable": CustomSerializationTimetable},
) |
Serialisation / deserialisation continues to work without kubernetes installed | def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition(".")[0] == "kubernetes":
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch("builtins.__import__", side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.providers.cncf.kubernetes.*?
imported_airflow = {
c.args[0].split(".", 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
"__type": "k8s.V1Pod",
"__var": PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"]) |
Unmap a deserialized mapped operator should be similar to deserializing an non-mapped operator. | def test_operator_expand_deserialized_unmap():
"""Unmap a deserialized mapped operator should be similar to deserializing an non-mapped operator."""
normal = BashOperator(task_id="a", bash_command=[1, 2], executor_config={"a": "b"})
mapped = BashOperator.partial(task_id="a", executor_config={"a": "b"}).expand(bash_command=[1, 2])
ser_mapped = BaseSerialization.serialize(mapped)
deser_mapped = BaseSerialization.deserialize(ser_mapped)
ser_normal = BaseSerialization.serialize(normal)
deser_normal = BaseSerialization.deserialize(ser_normal)
assert deser_mapped.unmap(None) == deser_normal |
Unmap a deserialized mapped sensor should be similar to deserializing a non-mapped sensor | def test_sensor_expand_deserialized_unmap():
"""Unmap a deserialized mapped sensor should be similar to deserializing a non-mapped sensor"""
normal = BashSensor(task_id="a", bash_command=[1, 2], mode="reschedule")
mapped = BashSensor.partial(task_id="a", mode="reschedule").expand(bash_command=[1, 2])
serialize = SerializedBaseOperator.serialize
deserialize = SerializedBaseOperator.deserialize
assert deserialize(serialize(mapped)).unmap(None) == deserialize(serialize(normal)) |
Test task resources serialization/deserialization. | def test_task_resources_serde():
"""
Test task resources serialization/deserialization.
"""
from airflow.operators.empty import EmptyOperator
execution_date = datetime(2020, 1, 1)
task_id = "task1"
with DAG("test_task_resources", start_date=execution_date) as _:
task = EmptyOperator(task_id=task_id, resources={"cpus": 0.1, "ram": 2048})
serialized = BaseSerialization.serialize(task)
assert serialized["__var"]["resources"] == {
"cpus": {"name": "CPU", "qty": 0.1, "units_str": "core(s)"},
"disk": {"name": "Disk", "qty": 512, "units_str": "MB"},
"gpus": {"name": "GPU", "qty": 0, "units_str": "gpu(s)"},
"ram": {"name": "RAM", "qty": 2048, "units_str": "MB"},
} |
Any time we recurse cls.serialize, we must forward all kwargs. | def test_recursive_serialize_calls_must_forward_kwargs():
"""Any time we recurse cls.serialize, we must forward all kwargs."""
import ast
valid_recursive_call_count = 0
skipped_recursive_calls = 0 # when another serialize method called
file = REPO_ROOT / "airflow/serialization/serialized_objects.py"
content = file.read_text()
tree = ast.parse(content)
class_def = None
for stmt in ast.walk(tree):
if isinstance(stmt, ast.ClassDef) and stmt.name == "BaseSerialization":
class_def = stmt
method_def = None
for elem in ast.walk(class_def):
if isinstance(elem, ast.FunctionDef) and elem.name == "serialize":
method_def = elem
break
kwonly_args = [x.arg for x in method_def.args.kwonlyargs]
for elem in ast.walk(method_def):
if isinstance(elem, ast.Call) and getattr(elem.func, "attr", "") == "serialize":
if not elem.func.value.id == "cls":
skipped_recursive_calls += 1
break
kwargs = {y.arg: y.value for y in elem.keywords}
for name in kwonly_args:
if name not in kwargs or getattr(kwargs[name], "id", "") != name:
ref = f"{file}:{elem.lineno}"
message = (
f"Error at {ref}; recursive calls to `cls.serialize` "
f"must forward the `{name}` argument"
)
raise Exception(message)
valid_recursive_call_count += 1
print(f"validated calls: {valid_recursive_call_count}")
assert valid_recursive_call_count > 0
assert skipped_recursive_calls == 1 |
If strict=True, serialization should fail when object is not JSON serializable. | def test_strict_mode():
"""If strict=True, serialization should fail when object is not JSON serializable."""
class Test:
a = 1
from airflow.serialization.serialized_objects import BaseSerialization
obj = [[[Test()]]] # nested to verify recursive behavior
BaseSerialization.serialize(obj) # does not raise
with pytest.raises(SerializationError, match="Encountered unexpected type"):
BaseSerialization.serialize(obj, strict=True) |
Test deserialize connection which serialised by previous serializer implementation. | def test_backcompat_deserialize_connection(conn_uri):
"""Test deserialize connection which serialised by previous serializer implementation."""
from airflow.serialization.serialized_objects import BaseSerialization
conn_obj = {Encoding.TYPE: DAT.CONNECTION, Encoding.VAR: {"conn_id": "TEST_ID", "uri": conn_uri}}
deserialized = BaseSerialization.deserialize(conn_obj)
assert deserialized.get_uri() == conn_uri |
If use_pydantic_models=True the objects should be serialized to Pydantic objects. | def test_serialize_deserialize_pydantic(input, pydantic_class, encoded_type, cmp_func):
"""If use_pydantic_models=True the objects should be serialized to Pydantic objects."""
pydantic = pytest.importorskip("pydantic", minversion="2.0.0")
from airflow.serialization.serialized_objects import BaseSerialization
with warnings.catch_warnings():
warnings.simplefilter("error", category=pydantic.warnings.PydanticDeprecationWarning)
serialized = BaseSerialization.serialize(input, use_pydantic_models=True) # does not raise
# Verify the result is JSON-serializable
json.dumps(serialized) # does not raise
assert serialized["__type"] == encoded_type
assert serialized["__var"] is not None
deserialized = BaseSerialization.deserialize(serialized, use_pydantic_models=True)
assert isinstance(deserialized, pydantic_class)
assert cmp_func(input, deserialized)
# verify that when we round trip a pydantic model we get the same thing
reserialized = BaseSerialization.serialize(deserialized, use_pydantic_models=True)
dereserialized = BaseSerialization.deserialize(reserialized, use_pydantic_models=True)
assert isinstance(dereserialized, pydantic_class)
assert dereserialized == deserialized
# Verify recursive behavior
obj = [[input]]
BaseSerialization.serialize(obj, use_pydantic_models=True) |
This is for AIP-44 when we need to send certain non-error exceptions
as part of an RPC call e.g. TaskDeferred or AirflowRescheduleException. | def test_roundtrip_exceptions():
"""This is for AIP-44 when we need to send certain non-error exceptions
as part of an RPC call e.g. TaskDeferred or AirflowRescheduleException."""
some_date = pendulum.now()
resched_exc = AirflowRescheduleException(reschedule_date=some_date)
ser = BaseSerialization.serialize(resched_exc)
deser = BaseSerialization.deserialize(ser)
assert isinstance(deser, AirflowRescheduleException)
assert deser.reschedule_date == some_date
del ser
del deser
exc = TaskDeferred(
trigger=MyTrigger(hi="yo"),
method_name="meth_name",
kwargs={"have": "pie"},
timeout=timedelta(seconds=30),
)
ser = BaseSerialization.serialize(exc)
deser = BaseSerialization.deserialize(ser)
assert deser.trigger.hi == "yo"
assert deser.method_name == "meth_name"
assert deser.kwargs == {"have": "pie"}
assert deser.timeout == timedelta(seconds=30) |
Override this fixture in provider's conftest.py | def provider_env_vars():
"""Override this fixture in provider's conftest.py"""
return () |
Add @pytest.mark.system(provider_name) for every system test. | def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None:
"""Add @pytest.mark.system(provider_name) for every system test."""
rootdir = config.rootpath
for item in items:
rel_path = item.path.relative_to(rootdir)
# Provider system tests
match = re.match(".+/system/providers/([^/]+)", str(rel_path))
if match:
provider = match.group(1)
item.add_marker(pytest.mark.system(provider))
# Core system tests
match = re.match(".+/system/[^/]+", str(rel_path))
if match:
item.add_marker(pytest.mark.system("core")) |
creates a flow that takes a CSV and converts it to a json containing the same data | def create_s3_to_s3_flow(flow_name: str, bucket_name: str, source_folder: str):
"""creates a flow that takes a CSV and converts it to a json containing the same data"""
client = boto3.client("appflow")
client.create_flow(
flowName=flow_name,
triggerConfig={"triggerType": "OnDemand"},
sourceFlowConfig={
"connectorType": "S3",
"sourceConnectorProperties": {
"S3": {
"bucketName": bucket_name,
"bucketPrefix": source_folder,
"s3InputFormatConfig": {"s3InputFileType": "CSV"},
},
},
},
destinationFlowConfigList=[
{
"connectorType": "S3",
"destinationConnectorProperties": {
"S3": {
"bucketName": bucket_name,
"s3OutputFormatConfig": {
"fileType": "JSON",
"aggregationConfig": {
"aggregationType": "None",
},
},
}
},
},
],
tasks=[
{
"sourceFields": ["col1", "col2"],
"connectorOperator": {"S3": "PROJECTION"},
"taskType": "Filter",
},
{
"sourceFields": ["col1"],
"connectorOperator": {"S3": "NO_OP"},
"destinationField": "col1",
"taskType": "Map",
"taskProperties": {"DESTINATION_DATA_TYPE": "string", "SOURCE_DATA_TYPE": "string"},
},
{
"sourceFields": ["col2"],
"connectorOperator": {"S3": "NO_OP"},
"destinationField": "col2",
"taskType": "Map",
"taskProperties": {"DESTINATION_DATA_TYPE": "string", "SOURCE_DATA_TYPE": "string"},
},
],
) |
Returns the AMI ID of the most recently-created Amazon Linux image | def get_latest_ami_id():
"""Returns the AMI ID of the most recently-created Amazon Linux image"""
# Amazon is retiring AL2 in 2023 and replacing it with Amazon Linux 2022.
# This image prefix should be futureproof, but may need adjusting depending
# on how they name the new images. This page should have AL2022 info when
# it comes available: https://aws.amazon.com/linux/amazon-linux-2022/faqs/
image_prefix = "Amazon Linux*"
root_device_name = "/dev/xvda"
images = boto3.client("ec2").describe_images(
Filters=[
{"Name": "description", "Values": [image_prefix]},
{
"Name": "architecture",
"Values": ["x86_64"],
}, # t3 instances are only compatible with x86 architecture
{
"Name": "root-device-type",
"Values": ["ebs"],
}, # instances which are capable of hibernation need to use an EBS-backed AMI
{"Name": "root-device-name", "Values": [root_device_name]},
],
Owners=["amazon"],
)
# Sort on CreationDate
return max(images["Images"], key=itemgetter("CreationDate"))["ImageId"] |
Creates an ECS cluster. | def create_cluster(cluster_name: str) -> None:
"""Creates an ECS cluster."""
boto3.client("ecs").create_cluster(clusterName=cluster_name) |
Creates a Task Definition. | def register_task_definition(task_name: str, container_name: str) -> str:
"""Creates a Task Definition."""
response = boto3.client("ecs").register_task_definition(
family=task_name,
# CPU and Memory are required for Fargate and are set to the lowest currently allowed values.
cpu="256",
memory="512",
containerDefinitions=[
{
"name": container_name,
"image": "ubuntu",
"workingDirectory": "/usr/bin",
"entryPoint": ["sh", "-c"],
"command": ["ls"],
}
],
requiresCompatibilities=["FARGATE"],
networkMode="awsvpc",
)
return response["taskDefinition"]["taskDefinitionArn"] |
Deletes the Task Definition. | def delete_task_definition(task_definition_arn: str) -> None:
"""Deletes the Task Definition."""
boto3.client("ecs").deregister_task_definition(taskDefinition=task_definition_arn) |
Deletes the ECS cluster. | def delete_cluster(cluster_name: str) -> None:
"""Deletes the ECS cluster."""
boto3.client("ecs").delete_cluster(cluster=cluster_name) |
A DynamoDB table has an ItemCount value, but it is only updated every six hours.
To verify this DAG worked, we will scan the table and count the items manually. | def get_dynamodb_item_count(table_name):
"""
A DynamoDB table has an ItemCount value, but it is only updated every six hours.
To verify this DAG worked, we will scan the table and count the items manually.
"""
table = DynamoDBHook(resource_type="dynamodb").conn.Table(table_name)
response = table.scan(Select="COUNT")
item_count = response["Count"]
while "LastEvaluatedKey" in response:
response = table.scan(Select="COUNT", ExclusiveStartKey=response["LastEvaluatedKey"])
item_count += response["Count"]
print(f"DynamoDB table contains {item_count} items.") |
We need a Docker image with the following requirements:
- Has numpy, pandas, requests, and boto3 installed
- Has our data preprocessing script mounted and set as the entry point | def _build_and_upload_docker_image(preprocess_script, repository_uri):
"""
We need a Docker image with the following requirements:
- Has numpy, pandas, requests, and boto3 installed
- Has our data preprocessing script mounted and set as the entry point
"""
with NamedTemporaryFile(mode="w+t") as preprocessing_script, NamedTemporaryFile(mode="w+t") as dockerfile:
preprocessing_script.write(preprocess_script)
preprocessing_script.flush()
dockerfile.write(
f"""
FROM public.ecr.aws/amazonlinux/amazonlinux
COPY {preprocessing_script.name.split('/')[2]} /preprocessing.py
ADD credentials /credentials
ENV AWS_SHARED_CREDENTIALS_FILE=/credentials
RUN yum install python3 pip -y
RUN pip3 install boto3 pandas requests
CMD [ "python3", "/preprocessing.py"]
"""
)
dockerfile.flush()
ecr_region = repository_uri.split(".")[3]
docker_build_and_push_commands = f"""
cp /root/.aws/credentials /tmp/credentials &&
# login to public ecr repo containing amazonlinux image (public login is always on us east 1)
aws ecr-public get-login-password --region us-east-1 |
docker login --username AWS --password-stdin public.ecr.aws &&
docker build --platform=linux/amd64 -f {dockerfile.name} -t {repository_uri} /tmp &&
rm /tmp/credentials &&
# login again, this time to the private repo we created to hold that specific image
aws ecr get-login-password --region {ecr_region} |
docker login --username AWS --password-stdin {repository_uri} &&
docker push {repository_uri}
"""
logger.info("building and uploading docker image for preprocessing...")
docker_build = subprocess.Popen(
docker_build_and_push_commands,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = docker_build.communicate()
if docker_build.returncode != 0:
raise RuntimeError(
"Failed to prepare docker image for the preprocessing job.\n"
"The following error happened while executing the sequence of bash commands:\n"
f"{stderr.decode()}"
) |
generates a very simple csv dataset with headers | def generate_data() -> str:
"""generates a very simple csv dataset with headers"""
content = "class,x,y\n" # headers
for i in range(SAMPLE_SIZE):
content += f"{i%100},{i},{SAMPLE_SIZE-i}\n"
return content |
Checks the CIDR blocks of existing subnets and attempts to extrapolate the next available block. | def _get_next_available_cidr(vpc_id: str) -> str:
"""Checks the CIDR blocks of existing subnets and attempts to extrapolate the next available block."""
error_msg_template = "Can not calculate the next available CIDR block: {}"
vpc_filter = {"Name": "vpc-id", "Values": [vpc_id]}
existing_subnets = boto3.client("ec2").describe_subnets(Filters=[vpc_filter])["Subnets"]
if not existing_subnets:
raise ValueError(error_msg_template.format("No subnets are found on the provided VPC."))
# Pull all CIDR blocks from the JSON response and convert them
# to IPv4Network objects which can be sorted and manipulated.
existing_cidr_blocks = [IPv4Network(subnet["CidrBlock"]) for subnet in existing_subnets]
# Can not predict the next block if existing block sizes (prefixlen) are not consistent.
if len({block.prefixlen for block in existing_cidr_blocks}) > 1:
raise ValueError(error_msg_template.format("Subnets do not all use the same CIDR block size."))
last_used_block = max(existing_cidr_blocks)
*_, last_reserved_ip = last_used_block
return f"{last_reserved_ip + 1}/{last_used_block.prefixlen}" |
Returns the VPC ID of the account's default VPC. | def get_default_vpc_id() -> str:
"""Returns the VPC ID of the account's default VPC."""
filters = [{"Name": "is-default", "Values": ["true"]}]
return boto3.client("ec2").describe_vpcs(Filters=filters)["Vpcs"][0]["VpcId"] |
Allocate a new IP address | def create_address_allocation():
"""Allocate a new IP address"""
return boto3.client("ec2").allocate_address()["AllocationId"] |
Create a NAT gateway | def create_nat_gateway(allocation_id: str, subnet_id: str):
"""Create a NAT gateway"""
client = boto3.client("ec2")
nat_gateway_id = client.create_nat_gateway(
AllocationId=allocation_id,
SubnetId=subnet_id,
ConnectivityType="public",
)["NatGateway"]["NatGatewayId"]
waiter = client.get_waiter("nat_gateway_available")
waiter.wait(NatGatewayIds=[nat_gateway_id])
return nat_gateway_id |
Create a route table for private subnets. | def create_route_table(vpc_id: str, nat_gateway_id: str, test_name: str):
"""Create a route table for private subnets."""
client = boto3.client("ec2")
tags = [{"Key": "Name", "Value": f"Route table for {test_name}"}]
route_table_id = client.create_route_table(
VpcId=vpc_id,
TagSpecifications=[{"ResourceType": "route-table", "Tags": tags}],
)["RouteTable"]["RouteTableId"]
client.create_route(
RouteTableId=route_table_id,
DestinationCidrBlock="0.0.0.0/0",
NatGatewayId=nat_gateway_id,
)
return route_table_id |
Fargate Profiles require two private subnets in two different availability zones.
These subnets require as well an egress route to the internet, using a NAT gateway to achieve this. | def create_private_subnets(
vpc_id: str,
route_table_id: str,
test_name: str,
number_to_make: int = 1,
cidr_block: str | None = None,
):
"""
Fargate Profiles require two private subnets in two different availability zones.
These subnets require as well an egress route to the internet, using a NAT gateway to achieve this.
"""
client = boto3.client("ec2")
subnet_ids = []
tags = [{"Key": "Name", "Value": f"Private Subnet for {test_name}"}]
zone_names = [zone["ZoneName"] for zone in client.describe_availability_zones()["AvailabilityZones"]]
# Create the requested number of subnets.
for counter in range(number_to_make):
new_subnet = client.create_subnet(
VpcId=vpc_id,
CidrBlock=cidr_block or _get_next_available_cidr(vpc_id),
AvailabilityZone=zone_names[counter],
TagSpecifications=[{"ResourceType": "subnet", "Tags": tags}],
)["Subnet"]["SubnetId"]
subnet_ids.append(new_subnet)
# Testing shows that a new subnet takes a very short but measurable
# time to create; wait to prevent a possible race condition.
client.get_waiter("subnet_available").wait(SubnetIds=[new_subnet])
# Associate the new subnets with the black hole route table to make them private.
client.associate_route_table(RouteTableId=route_table_id, SubnetId=new_subnet)
return subnet_ids |
Returns an operator that'll print the output of a `k describe pod` in the airflow logs. | def get_describe_pod_operator(cluster_name: str, pod_name: str) -> Operator:
"""Returns an operator that'll print the output of a `k describe pod` in the airflow logs."""
return BashOperator(
task_id="describe_pod",
bash_command=f"""
install_aws.sh;
install_kubectl.sh;
# configure kubectl to hit the right cluster
aws eks update-kubeconfig --name {cluster_name};
# once all this setup is done, actually describe the pod
echo "vvv pod description below vvv";
kubectl describe pod {pod_name};
echo "^^^ pod description above ^^^" """,
) |
Extracts the module name from the test module.
:return: The name of the test module that called the helper method. | def _get_test_name() -> str:
"""
Extracts the module name from the test module.
:return: The name of the test module that called the helper method.
"""
# The exact layer of the stack will depend on if this is called directly
# or from another helper, but the test will always contain the identifier.
test_filename: str = next(
frame.filename
for frame in inspect.stack()
if any(identifier in frame.filename for identifier in TEST_FILE_IDENTIFIERS)
)
return Path(test_filename).stem |
Verifies that a prospective Environment ID value fits requirements.
An Environment ID for an AWS System test must be a lowercase alphanumeric
string which starts with a letter.
:param env_id: An Environment ID to validate.
:return: A validated string cast to lowercase. | def _validate_env_id(env_id: str) -> str:
"""
Verifies that a prospective Environment ID value fits requirements.
An Environment ID for an AWS System test must be a lowercase alphanumeric
string which starts with a letter.
:param env_id: An Environment ID to validate.
:return: A validated string cast to lowercase.
"""
if any(char.isupper() for char in str(env_id)):
print(LOWERCASE_ENV_ID_MSG)
if not env_id.isalnum() or not env_id[0].isalpha():
raise ValueError(INVALID_ENV_ID_MSG)
return env_id.lower() |
Test values are stored in the SSM Value as a JSON-encoded dict of key/value pairs.
:param key: The key to search for within the returned Parameter Value.
:return: The value of the provided key from SSM | def _fetch_from_ssm(key: str, test_name: str | None = None) -> str:
"""
Test values are stored in the SSM Value as a JSON-encoded dict of key/value pairs.
:param key: The key to search for within the returned Parameter Value.
:return: The value of the provided key from SSM
"""
_test_name: str = test_name or _get_test_name()
hook = SsmHook(aws_conn_id=None)
value: str = ""
try:
value = json.loads(hook.get_parameter_value(_test_name))[key]
# Since a default value after the SSM check is allowed, these exceptions should not stop execution.
except NoCredentialsError as e:
log.info("No boto credentials found: %s", e)
except ClientError as e:
log.info("Client error when connecting to SSM: %s", e)
except hook.conn.exceptions.ParameterNotFound as e:
log.info("SSM does not contain any parameter for this test: %s", e)
except KeyError as e:
log.info("SSM contains one parameter for this test, but not the requested value: %s", e)
return value |
Given a Parameter name: first check for an existing Environment Variable,
then check SSM for a value. If neither are available, fall back on the
optional default value.
:param key: The name of the Parameter to fetch a value for.
:param default_value: The default value to use if no value can be found.
:param test_name: The system test name.
:param optional: Whether the variable is optional. If True, does not raise `ValueError` if variable
does not exist
:return: The value of the parameter. | def fetch_variable(
key: str,
default_value: str | None = None,
test_name: str | None = None,
optional: bool = False,
) -> str | None:
"""
Given a Parameter name: first check for an existing Environment Variable,
then check SSM for a value. If neither are available, fall back on the
optional default value.
:param key: The name of the Parameter to fetch a value for.
:param default_value: The default value to use if no value can be found.
:param test_name: The system test name.
:param optional: Whether the variable is optional. If True, does not raise `ValueError` if variable
does not exist
:return: The value of the parameter.
"""
value: str | None = os.getenv(key, _fetch_from_ssm(key, test_name)) or default_value
if not optional and not value:
raise ValueError(NO_VALUE_MSG.format(key=key))
return value |
Retrieves or generates an Environment ID, validate that it is suitable,
export it as an Environment Variable, and return it.
If an Environment ID has already been generated, use that.
Otherwise, try to fetch it and export it as an Environment Variable.
If there is not one available to fetch then generate one and export it as an Environment Variable.
:return: A valid System Test Environment ID. | def set_env_id() -> str:
"""
Retrieves or generates an Environment ID, validate that it is suitable,
export it as an Environment Variable, and return it.
If an Environment ID has already been generated, use that.
Otherwise, try to fetch it and export it as an Environment Variable.
If there is not one available to fetch then generate one and export it as an Environment Variable.
:return: A valid System Test Environment ID.
"""
env_id: str = str(fetch_variable(ENV_ID_ENVIRON_KEY, DEFAULT_ENV_ID))
env_id = _validate_env_id(env_id)
os.environ[ENV_ID_ENVIRON_KEY] = env_id
return env_id |
If all tasks in this dagrun have succeeded, then delete the associated logs.
Otherwise, append the logs with a retention policy. This allows the logs
to be used for troubleshooting but assures they won't build up indefinitely.
:param logs: A list of log_group/stream_prefix tuples to delete.
:param force_delete: Whether to check log streams within the log group before
removal. If True, removes the log group and all its log streams inside it.
:param retry: Whether to retry if the log group/stream was not found. In some
cases, the log group/stream is created seconds after the main resource has
been created. By default, it retries for 3 times with a 5s waiting period.
:param retry_times: Number of retries.
:param ti: Used to check the status of the tasks. This gets pulled from the
DAG's context and does not need to be passed manually. | def prune_logs(
logs: list[tuple[str, str | None]],
force_delete: bool = False,
retry: bool = False,
retry_times: int = 3,
ti=None,
):
"""
If all tasks in this dagrun have succeeded, then delete the associated logs.
Otherwise, append the logs with a retention policy. This allows the logs
to be used for troubleshooting but assures they won't build up indefinitely.
:param logs: A list of log_group/stream_prefix tuples to delete.
:param force_delete: Whether to check log streams within the log group before
removal. If True, removes the log group and all its log streams inside it.
:param retry: Whether to retry if the log group/stream was not found. In some
cases, the log group/stream is created seconds after the main resource has
been created. By default, it retries for 3 times with a 5s waiting period.
:param retry_times: Number of retries.
:param ti: Used to check the status of the tasks. This gets pulled from the
DAG's context and does not need to be passed manually.
"""
if all_tasks_passed(ti):
_purge_logs(logs, force_delete, retry, retry_times)
else:
client: BaseClient = boto3.client("logs")
for group, _ in logs:
client.put_retention_policy(logGroupName=group, retentionInDays=30) |
Accepts a tuple in the format: ('log group name', 'log stream prefix').
For each log group, it will delete any log streams matching the provided
prefix then if the log group is empty, delete the group. If the group
is not empty that indicates there are logs not generated by the test and
those are left intact. If `check_log_streams` is True, it will simply delete the log group regardless
of log streams within that log group.
:param test_logs: A list of log_group/stream_prefix tuples to delete.
:param force_delete: Whether to check log streams within the log group before removal. If True,
removes the log group and all its log streams inside it
:param retry: Whether to retry if the log group/stream was not found. In some cases, the log group/stream
is created seconds after the main resource has been created. By default, it retries for 3 times
with a 5s waiting period
:param retry_times: Number of retries | def _purge_logs(
test_logs: list[tuple[str, str | None]],
force_delete: bool = False,
retry: bool = False,
retry_times: int = 3,
) -> None:
"""
Accepts a tuple in the format: ('log group name', 'log stream prefix').
For each log group, it will delete any log streams matching the provided
prefix then if the log group is empty, delete the group. If the group
is not empty that indicates there are logs not generated by the test and
those are left intact. If `check_log_streams` is True, it will simply delete the log group regardless
of log streams within that log group.
:param test_logs: A list of log_group/stream_prefix tuples to delete.
:param force_delete: Whether to check log streams within the log group before removal. If True,
removes the log group and all its log streams inside it
:param retry: Whether to retry if the log group/stream was not found. In some cases, the log group/stream
is created seconds after the main resource has been created. By default, it retries for 3 times
with a 5s waiting period
:param retry_times: Number of retries
"""
client: BaseClient = boto3.client("logs")
for group, prefix in test_logs:
try:
if prefix:
log_streams = client.describe_log_streams(
logGroupName=group,
logStreamNamePrefix=prefix,
)["logStreams"]
for stream_name in [stream["logStreamName"] for stream in log_streams]:
client.delete_log_stream(logGroupName=group, logStreamName=stream_name)
if force_delete or not client.describe_log_streams(logGroupName=group)["logStreams"]:
client.delete_log_group(logGroupName=group)
except ClientError as e:
if not retry or retry_times == 0 or e.response["Error"]["Code"] != "ResourceNotFoundException":
raise e
time.sleep(PURGE_LOGS_INTERVAL_PERIOD)
_purge_logs(
test_logs=test_logs,
force_delete=force_delete,
retry=retry,
retry_times=retry_times - 1,
) |
This task should call Twitter API and retrieve tweets from yesterday from and to for the four twitter
users (Twitter_A,..,Twitter_D) There should be eight csv output files generated by this task and naming
convention is direction(from or to)_twitterHandle_date.csv | def fetch_tweets():
"""
This task should call Twitter API and retrieve tweets from yesterday from and to for the four twitter
users (Twitter_A,..,Twitter_D) There should be eight csv output files generated by this task and naming
convention is direction(from or to)_twitterHandle_date.csv
""" |
This is a placeholder to clean the eight files. In this step you can get rid of or cherry pick columns
and different parts of the text. | def clean_tweets():
"""
This is a placeholder to clean the eight files. In this step you can get rid of or cherry pick columns
and different parts of the text.
""" |
This is a placeholder to analyze the twitter data. Could simply be a sentiment analysis through algorithms
like bag of words or something more complicated. You can also take a look at Web Services to do such
tasks. | def analyze_tweets():
"""
This is a placeholder to analyze the twitter data. Could simply be a sentiment analysis through algorithms
like bag of words or something more complicated. You can also take a look at Web Services to do such
tasks.
""" |
This is a placeholder to extract summary from Hive data and store it to MySQL. | def transfer_to_db():
"""
This is a placeholder to extract summary from Hive data and store it to MySQL.
""" |
### Example Pyspark DAG
This is an example DAG which uses pyspark | def example_pyspark():
"""
### Example Pyspark DAG
This is an example DAG which uses pyspark
"""
# [START task_pyspark]
@task.pyspark(conn_id="spark-local")
def spark_task(spark: SparkSession, sc: SparkContext) -> pd.DataFrame:
df = spark.createDataFrame(
[
(1, "John Doe", 21),
(2, "Jane Doe", 22),
(3, "Joe Bloggs", 23),
],
["id", "name", "age"],
)
df.show()
return df.toPandas()
# [END task_pyspark]
@task
def print_df(df: pd.DataFrame):
print(df)
df = spark_task()
print_df(df) |
The function that will be executed on failure.
:param context: The context of the executed task. | def failure_callback(context):
"""
The function that will be executed on failure.
:param context: The context of the executed task.
"""
message = f"The task {context['ti'].task_id} failed"
DingdingHook(message_type="text", message=message, at_all=True).send() |
### TaskFlow API Tutorial Documentation
This is a simple data pipeline example which demonstrates the use of
the TaskFlow API using three simple tasks for Extract, Transform, and Load.
Documentation that goes along with the Airflow TaskFlow API tutorial is
located
[here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html) | def tutorial_taskflow_api_docker_virtualenv():
"""
### TaskFlow API Tutorial Documentation
This is a simple data pipeline example which demonstrates the use of
the TaskFlow API using three simple tasks for Extract, Transform, and Load.
Documentation that goes along with the Airflow TaskFlow API tutorial is
located
[here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html)
"""
# [START extract_virtualenv]
@task.virtualenv(
use_dill=True,
system_site_packages=False,
requirements=["funcsigs"],
)
def extract():
"""
#### Extract task
A simple Extract task to get data ready for the rest of the data
pipeline. In this case, getting data is simulated by reading from a
hardcoded JSON string.
"""
import json
data_string = '{"1001": 301.27, "1002": 433.21, "1003": 502.22}'
order_data_dict = json.loads(data_string)
return order_data_dict
# [END extract_virtualenv]
# [START transform_docker]
@task.docker(image="python:3.9-slim-bookworm", multiple_outputs=True)
def transform(order_data_dict: dict):
"""
#### Transform task
A simple Transform task which takes in the collection of order data and
computes the total order value.
"""
total_order_value = 0
for value in order_data_dict.values():
total_order_value += value
return {"total_order_value": total_order_value}
# [END transform_docker]
# [START load]
@task()
def load(total_order_value: float):
"""
#### Load task
A simple Load task which takes in the result of the Transform task and
instead of saving it to end user review, just prints it out.
"""
print(f"Total order value is: {total_order_value:.2f}")
# [END load]
# [START main_flow]
order_data = extract()
order_summary = transform(order_data)
load(order_summary["total_order_value"]) |
show_tables queries elasticsearch to list available tables | def show_tables():
"""
show_tables queries elasticsearch to list available tables
"""
# [START howto_elasticsearch_query]
es = ElasticsearchSQLHook(elasticsearch_conn_id=CONN_ID)
# Handle ES conn with context manager
with es.get_conn() as es_conn:
tables = es_conn.execute("SHOW TABLES")
for table, *_ in tables:
print(f"table: {table}")
return True |
Use ElasticSearchPythonHook to print results from a local Elasticsearch | def use_elasticsearch_hook():
"""
Use ElasticSearchPythonHook to print results from a local Elasticsearch
"""
es_hosts = ["http://localhost:9200"]
es_hook = ElasticsearchPythonHook(hosts=es_hosts)
query = {"query": {"match_all": {}}}
result = es_hook.search(query=query)
print(result)
return True |
Using column name returns spec of the column. | def get_target_column_spec(columns_specs: list[dict], column_name: str) -> str:
"""
Using column name returns spec of the column.
"""
for column in columns_specs:
if column["display_name"] == column_name:
return extract_object_id(column)
raise Exception(f"Unknown target column: {column_name}") |
Using column name returns spec of the column. | def get_target_column_spec(columns_specs: list[dict], column_name: str) -> str:
"""
Using column name returns spec of the column.
"""
for column in columns_specs:
if column["display_name"] == column_name:
return extract_object_id(column)
raise Exception(f"Unknown target column: {column_name}") |
Generates an ip configuration for a CloudSQL instance creation body | def ip_configuration() -> dict[str, Any]:
"""Generates an ip configuration for a CloudSQL instance creation body"""
if run_in_composer():
# Use connection to Cloud SQL instance via Private IP within the Cloud Composer's network.
return {
"ipv4Enabled": True,
"requireSsl": False,
"enablePrivatePathForGoogleCloudServices": True,
"privateNetwork": """{{ task_instance.xcom_pull('get_composer_network')}}""",
}
else:
# Use connection to Cloud SQL instance via Public IP from anywhere (mask 0.0.0.0/0).
# Consider specifying your network mask
# for allowing requests only from the trusted sources, not from anywhere.
return {
"ipv4Enabled": True,
"requireSsl": False,
"authorizedNetworks": [
{"value": "0.0.0.0/0"},
],
} |
Generates a CloudSQL instance creation body | def cloud_sql_instance_create_body(database_provider: dict[str, Any]) -> dict[str, Any]:
"""Generates a CloudSQL instance creation body"""
create_body: dict[str, Any] = deepcopy(CLOUD_SQL_INSTANCE_CREATE_BODY_TEMPLATE)
create_body["name"] = database_provider["cloud_sql_instance_name"]
create_body["databaseVersion"] = database_provider["database_version"]
create_body["settings"]["ipConfiguration"] = ip_configuration()
return create_body |
Generates a CloudSQL database creation body | def cloud_sql_database_create_body(instance: str) -> dict[str, Any]:
"""Generates a CloudSQL database creation body"""
return {
"instance": instance,
"name": CLOUD_SQL_DATABASE_NAME,
"project": PROJECT_ID,
} |
Generates an ip configuration for a CloudSQL instance creation body | def ip_configuration() -> dict[str, Any]:
"""Generates an ip configuration for a CloudSQL instance creation body"""
if run_in_composer():
# Use connection to Cloud SQL instance via Private IP within the Cloud Composer's network.
return {
"ipv4Enabled": True,
"requireSsl": False,
"sslMode": "ENCRYPTED_ONLY",
"enablePrivatePathForGoogleCloudServices": True,
"privateNetwork": """{{ task_instance.xcom_pull('get_composer_network')}}""",
}
else:
# Use connection to Cloud SQL instance via Public IP from anywhere (mask 0.0.0.0/0).
# Consider specifying your network mask
# for allowing requests only from the trusted sources, not from anywhere.
return {
"ipv4Enabled": True,
"requireSsl": False,
"sslMode": "ENCRYPTED_ONLY",
"authorizedNetworks": [
{"value": "0.0.0.0/0"},
],
} |
Generates a CloudSQL instance creation body | def cloud_sql_instance_create_body(database_provider: dict[str, Any]) -> dict[str, Any]:
"""Generates a CloudSQL instance creation body"""
create_body: dict[str, Any] = deepcopy(CLOUD_SQL_INSTANCE_CREATE_BODY_TEMPLATE)
create_body["name"] = database_provider["cloud_sql_instance_name"]
create_body["databaseVersion"] = database_provider["database_version"]
create_body["settings"]["ipConfiguration"] = ip_configuration()
return create_body |
Generates a CloudSQL database creation body | def cloud_sql_database_create_body(instance: str) -> dict[str, Any]:
"""Generates a CloudSQL database creation body"""
return {
"instance": instance,
"name": CLOUD_SQL_DATABASE_NAME,
"project": PROJECT_ID,
} |
DebugExecutor change sensor mode from poke to reschedule. Some sensors don't work correctly
in reschedule mode. They are decorated with `poke_mode_only` decorator to fail when mode is changed.
This method creates dummy property to overwrite it and force poke method to always return True. | def workaround_in_debug_executor(cls):
"""
DebugExecutor change sensor mode from poke to reschedule. Some sensors don't work correctly
in reschedule mode. They are decorated with `poke_mode_only` decorator to fail when mode is changed.
This method creates dummy property to overwrite it and force poke method to always return True.
"""
cls.mode = dummy_mode_property()
cls.poke = lambda self, context: True |
A callable to upload file from GCS to AWS bucket | def upload_file():
"""A callable to upload file from GCS to AWS bucket"""
gcs_hook = GCSHook()
s3_hook = S3Hook()
with gcs_hook.provide_file(bucket_name=RESOURCES_BUCKET_NAME, object_name=UPLOAD_FILE) as gcs_file:
s3_hook.load_file_obj(file_obj=gcs_file, key=UPLOAD_FILE, bucket_name=BUCKET_NAME) |
Remove invalid characters for filename | def safe_name(s: str) -> str:
"""
Remove invalid characters for filename
"""
return re.sub("[^0-9a-zA-Z_]+", "_", s) |
Take the raw `request.Response` object, and check for a cursor.
If a cursor exists, this function creates and return parameters to call
the next page of result. | def get_next_page_cursor(response) -> dict | None:
"""
Take the raw `request.Response` object, and check for a cursor.
If a cursor exists, this function creates and return parameters to call
the next page of result.
"""
next_cursor = response.json().get("cursor")
if next_cursor:
return dict(data={"cursor": next_cursor})
return None |
Create a fileshare with directory | def create_fileshare():
"""Create a fileshare with directory"""
hook = AzureFileShareHook()
hook.create_share(NAME)
hook.create_directory(share_name=NAME, directory_name=DIRECTORY)
exists = hook.check_for_directory(share_name=NAME, directory_name=DIRECTORY)
if not exists:
raise Exception |
Subsets and Splits