response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Gives to tests the name of the class being tested. | def gen_test_name(trigger):
"""Gives to tests the name of the class being tested."""
return trigger.__class__.__name__ |
Assembles the list of tuples which will be used to validate test results.
The format of the tuple is (attribute name, expected value)
:param inputs: A class containing lists of tuples to use for verifying the output
of cluster or nodegroup creation tests.
:param cluster_name: The name of the cluster under test.
:param fargate_profile_name: The name of the Fargate profile under test if applicable.
:param nodegroup_name: The name of the nodegroup under test if applicable.
:return: Returns a list of tuples containing the keys and values to be validated in testing. | def attributes_to_test(
inputs: InputTypes,
cluster_name: str,
fargate_profile_name: str | None = None,
nodegroup_name: str | None = None,
) -> list[tuple]:
"""
Assembles the list of tuples which will be used to validate test results.
The format of the tuple is (attribute name, expected value)
:param inputs: A class containing lists of tuples to use for verifying the output
of cluster or nodegroup creation tests.
:param cluster_name: The name of the cluster under test.
:param fargate_profile_name: The name of the Fargate profile under test if applicable.
:param nodegroup_name: The name of the nodegroup under test if applicable.
:return: Returns a list of tuples containing the keys and values to be validated in testing.
"""
result: list[tuple] = deepcopy(inputs.REQUIRED + inputs.OPTIONAL + [STATUS]) # type: ignore
if inputs == ClusterInputs:
result += [(ClusterAttributes.NAME, cluster_name)]
elif inputs == FargateProfileInputs:
result += [(FargateProfileAttributes.FARGATE_PROFILE_NAME, fargate_profile_name)]
elif inputs == NodegroupInputs:
# The below tag is mandatory and must have a value of either 'owned' or 'shared'
# A value of 'owned' denotes that the subnets are exclusive to the nodegroup.
# The 'shared' value allows more than one resource to use the subnet.
required_tag: dict = {f"kubernetes.io/cluster/{cluster_name}": "owned"}
# Find the user-submitted tag set and append the required tag to it.
final_tag_set: dict = required_tag
for key, value in result:
if key == "tags":
final_tag_set = {**value, **final_tag_set}
# Inject it back into the list.
result = [
(key, value) if (key != NodegroupAttributes.TAGS) else (NodegroupAttributes.TAGS, final_tag_set)
for key, value in result
]
result += [(NodegroupAttributes.NODEGROUP_NAME, nodegroup_name)]
return result |
Generates a number of EKS Clusters with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:param num_clusters: Number of clusters to generate.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a list of the names of the generated clusters. | def generate_clusters(eks_hook: EksHook, num_clusters: int, minimal: bool) -> list[str]:
"""
Generates a number of EKS Clusters with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:param num_clusters: Number of clusters to generate.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a list of the names of the generated clusters.
"""
# Generates N clusters named cluster0, cluster1, .., clusterN
return [
eks_hook.create_cluster(name=f"cluster{count}", **_input_builder(ClusterInputs, minimal))[
ResponseAttributes.CLUSTER
][ClusterAttributes.NAME]
for count in range(num_clusters)
] |
Generates a number of EKS Fargate profiles with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:param cluster_name: The name of the EKS Cluster to attach the nodegroups to.
:param num_profiles: Number of Fargate profiles to generate.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a list of the names of the generated nodegroups. | def generate_fargate_profiles(
eks_hook: EksHook, cluster_name: str, num_profiles: int, minimal: bool
) -> list[str]:
"""
Generates a number of EKS Fargate profiles with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:param cluster_name: The name of the EKS Cluster to attach the nodegroups to.
:param num_profiles: Number of Fargate profiles to generate.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a list of the names of the generated nodegroups.
"""
# Generates N Fargate profiles named profile0, profile1, .., profileN
return [
eks_hook.create_fargate_profile(
fargateProfileName=f"profile{count}",
clusterName=cluster_name,
**_input_builder(FargateProfileInputs, minimal),
)[ResponseAttributes.FARGATE_PROFILE][FargateProfileAttributes.FARGATE_PROFILE_NAME]
for count in range(num_profiles)
] |
Generates a number of EKS Managed Nodegroups with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:param cluster_name: The name of the EKS Cluster to attach the nodegroups to.
:param num_nodegroups: Number of clusters to generate.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a list of the names of the generated nodegroups. | def generate_nodegroups(
eks_hook: EksHook, cluster_name: str, num_nodegroups: int, minimal: bool
) -> list[str]:
"""
Generates a number of EKS Managed Nodegroups with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:param cluster_name: The name of the EKS Cluster to attach the nodegroups to.
:param num_nodegroups: Number of clusters to generate.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a list of the names of the generated nodegroups.
"""
# Generates N nodegroups named nodegroup0, nodegroup1, .., nodegroupN
return [
eks_hook.create_nodegroup(
nodegroupName=f"nodegroup{count}",
clusterName=cluster_name,
**_input_builder(NodegroupInputs, minimal),
)[ResponseAttributes.NODEGROUP][NodegroupAttributes.NODEGROUP_NAME]
for count in range(num_nodegroups)
] |
Returns True if the provided region and partition are a valid pair.
:param region: AWS region code to test.
:param partition: AWS partition code to test.
:return: Returns True if the provided region and partition are a valid pair. | def region_matches_partition(region: str, partition: str) -> bool:
"""
Returns True if the provided region and partition are a valid pair.
:param region: AWS region code to test.
:param partition: AWS partition code to test.
:return: Returns True if the provided region and partition are a valid pair.
"""
valid_matches: list[tuple[str, str]] = [
("cn-", "aws-cn"),
("us-gov-", "aws-us-gov"),
("us-gov-iso-", "aws-iso"),
("us-gov-iso-b-", "aws-iso-b"),
]
for prefix, expected_partition in valid_matches:
if region.startswith(prefix):
return partition == expected_partition
return partition == "aws" |
Assembles the inputs which will be used to generate test object into a dictionary.
:param options: A class containing lists of tuples to use for to create
the cluster or nodegroup used in testing.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a dict containing the keys and values to be validated in testing. | def _input_builder(options: InputTypes, minimal: bool) -> dict:
"""
Assembles the inputs which will be used to generate test object into a dictionary.
:param options: A class containing lists of tuples to use for to create
the cluster or nodegroup used in testing.
:param minimal: If True, only the required values are generated; if False all values are generated.
:return: Returns a dict containing the keys and values to be validated in testing.
"""
values: list[tuple] = deepcopy(options.REQUIRED) # type: ignore
if not minimal:
values.extend(deepcopy(options.OPTIONAL))
return dict(values) |
Converts a string template into a regex template for pattern matching.
:param value: The template string to convert.
:returns: Returns a regex pattern | def string_to_regex(value: str) -> Pattern[str]:
"""
Converts a string template into a regex template for pattern matching.
:param value: The template string to convert.
:returns: Returns a regex pattern
"""
return re.compile(re.sub(r"[{](.*?)[}]", r"(?P<\1>.+)", value)) |
API Input and Output keys are formatted differently. The EKS Hooks map
as closely as possible to the API calls, which use camelCase variable
names, but the Operators match python conventions and use snake_case.
This method converts the keys of a dict which are in snake_case (input
format) to camelCase (output format) while leaving the dict values unchanged.
:param original: Dict which needs the keys converted.
:value original: Dict | def convert_keys(original: dict) -> dict:
"""
API Input and Output keys are formatted differently. The EKS Hooks map
as closely as possible to the API calls, which use camelCase variable
names, but the Operators match python conventions and use snake_case.
This method converts the keys of a dict which are in snake_case (input
format) to camelCase (output format) while leaving the dict values unchanged.
:param original: Dict which needs the keys converted.
:value original: Dict
"""
if "nodegroup_name" in original.keys():
conversion_map = {
"cluster_name": "clusterName",
"cluster_role_arn": "roleArn",
"nodegroup_subnets": "subnets",
"subnets": "subnets",
"nodegroup_name": "nodegroupName",
"nodegroup_role_arn": "nodeRole",
}
elif "fargate_profile_name" in original.keys():
conversion_map = {
"cluster_name": "clusterName",
"fargate_profile_name": "fargateProfileName",
"subnets": "subnets",
# The following are "duplicated" because we used the more verbose/descriptive version
# in the CreateCluster Operator when creating a cluster alongside a Fargate profile, but
# the more terse version in the CreateFargateProfile Operator for the sake of convenience.
"pod_execution_role_arn": "podExecutionRoleArn",
"fargate_pod_execution_role_arn": "podExecutionRoleArn",
"selectors": "selectors",
"fargate_selectors": "selectors",
}
else:
conversion_map = {
"cluster_name": "name",
"cluster_role_arn": "roleArn",
"resources_vpc_config": "resourcesVpcConfig",
}
return {conversion_map[k] if k in conversion_map else k: v for (k, v) in deepcopy(original).items()} |
There does not appear to be a straight-forward way to assert the type of waiter.
Instead, get the class name and check if it contains the expected name.
:param waiter: A mocked Boto3 Waiter object.
:param expected: The expected class name of the Waiter object, for example "ClusterActive". | def assert_expected_waiter_type(waiter: mock.MagicMock, expected: str):
"""
There does not appear to be a straight-forward way to assert the type of waiter.
Instead, get the class name and check if it contains the expected name.
:param waiter: A mocked Boto3 Waiter object.
:param expected: The expected class name of the Waiter object, for example "ClusterActive".
"""
assert expected in str(type(waiter.call_args.args[0])) |
no_op A function that returns its arguments
:return: whatever was passed in
:rtype: Any | def _no_op(*args, **kwargs) -> Any:
"""no_op A function that returns its arguments
:return: whatever was passed in
:rtype: Any
"""
return args, kwargs |
simple_producer A function that returns the key,value passed
in for production via "KafkaProducerOperator"
:param key: the key for the message
:param value: the value for the message
:return: The Key / Value pair for production via the operator
:rtype: List[Tuple[Any, Any]] | def _simple_producer(key, value) -> list[tuple[Any, Any]]:
"""simple_producer A function that returns the key,value passed
in for production via "KafkaProducerOperator"
:param key: the key for the message
:param value: the value for the message
:return: The Key / Value pair for production via the operator
:rtype: List[Tuple[Any, Any]]
"""
return [(key, value)] |
Get the value after `sentinel` in an `iterable` | def get_after(sentinel, iterable):
"""Get the value after `sentinel` in an `iterable`"""
truncated = itertools.dropwhile(lambda el: el != sentinel, iterable)
next(truncated)
return next(truncated) |
Register the same signals as scheduler does to test celery_executor to make sure it does not
hang. | def register_signals():
"""
Register the same signals as scheduler does to test celery_executor to make sure it does not
hang.
"""
orig_sigint = orig_sigterm = orig_sigusr2 = signal.SIG_DFL
orig_sigint = signal.signal(signal.SIGINT, _exit_gracefully)
orig_sigterm = signal.signal(signal.SIGTERM, _exit_gracefully)
orig_sigusr2 = signal.signal(signal.SIGUSR2, _exit_gracefully)
yield
# Restore original signal handlers after test
signal.signal(signal.SIGINT, orig_sigint)
signal.signal(signal.SIGTERM, orig_sigterm)
signal.signal(signal.SIGUSR2, orig_sigusr2) |
Test that celery_executor does not hang after many runs. | def test_send_tasks_to_celery_hang(register_signals):
"""
Test that celery_executor does not hang after many runs.
"""
executor = celery_executor.CeleryExecutor()
task = MockTask()
task_tuples_to_send = [(None, None, None, task) for _ in range(26)]
for _ in range(250):
# This loop can hang on Linux if celery_executor does something wrong with
# multiprocessing.
results = executor._send_tasks_to_celery(task_tuples_to_send)
assert results == [(None, None, 1) for _ in task_tuples_to_send] |
Helper fixture for obtain data file from data directory. | def data_file():
"""Helper fixture for obtain data file from data directory."""
if not DATA_FILE_DIRECTORY.exists():
msg = f"Data Directory {DATA_FILE_DIRECTORY.as_posix()!r} does not exist."
raise FileNotFoundError(msg)
elif not DATA_FILE_DIRECTORY.is_dir():
msg = f"Data Directory {DATA_FILE_DIRECTORY.as_posix()!r} expected to be a directory."
raise NotADirectoryError(msg)
def wrapper(filepath: str | Path) -> Path:
return DATA_FILE_DIRECTORY.joinpath(filepath).resolve(strict=True)
return wrapper |
Test that k8s_pod_yaml is rendered correctly, stored in the Database,
and are correctly fetched using RTIF.get_k8s_pod_yaml | def test_get_k8s_pod_yaml(render_k8s_pod_yaml, redact, dag_maker):
"""
Test that k8s_pod_yaml is rendered correctly, stored in the Database,
and are correctly fetched using RTIF.get_k8s_pod_yaml
"""
with dag_maker("test_get_k8s_pod_yaml") as dag:
task = BashOperator(task_id="test", bash_command="echo hi")
dr = dag_maker.create_dagrun()
dag.fileloc = TEST_DAGS_FOLDER + "/test_get_k8s_pod_yaml.py"
ti = dr.task_instances[0]
ti.task = task
render_k8s_pod_yaml.return_value = {"I'm a": "pod"}
rtif = RTIF(ti=ti)
assert ti.dag_id == rtif.dag_id
assert ti.task_id == rtif.task_id
assert ti.run_id == rtif.run_id
expected_pod_yaml = {"I'm a": "pod"}
assert rtif.k8s_pod_yaml == render_k8s_pod_yaml.return_value
# K8s pod spec dict was passed to redact
redact.assert_any_call(rtif.k8s_pod_yaml)
with create_session() as session:
session.add(rtif)
session.flush()
assert expected_pod_yaml == RTIF.get_k8s_pod_yaml(ti=ti, session=session)
make_transient(ti)
# "Delete" it from the DB
session.rollback()
# Test the else part of get_k8s_pod_yaml
# i.e. for the TIs that are not stored in RTIF table
# Fetching them will return None
assert RTIF.get_k8s_pod_yaml(ti=ti, session=session) is None |
Emit pods with given phases sequentially.
`statuses_to_emit` should be a list of bools indicating running or not. | def get_read_pod_mock_containers(statuses_to_emit=None):
"""
Emit pods with given phases sequentially.
`statuses_to_emit` should be a list of bools indicating running or not.
"""
async def mock_read_namespaced_pod(*args, **kwargs):
container_mock = MagicMock()
container_mock.state.running = statuses_to_emit.pop(0)
event_mock = MagicMock()
event_mock.status.container_statuses = [container_mock]
return event_mock
return mock_read_namespaced_pod |
The `container_is_running` method is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This function
emits params used in `test_container_is_running` to verify this behavior.
We create mock classes not derived from MagicMock because with an instance `e` of MagicMock,
tests like `e.hello is not None` are always True. | def params_for_test_container_is_running():
"""The `container_is_running` method is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This function
emits params used in `test_container_is_running` to verify this behavior.
We create mock classes not derived from MagicMock because with an instance `e` of MagicMock,
tests like `e.hello is not None` are always True.
"""
class RemotePodMock:
pass
class ContainerStatusMock:
def __init__(self, name):
self.name = name
def remote_pod(running=None, not_running=None):
e = RemotePodMock()
e.status = RemotePodMock()
e.status.container_statuses = []
for r in not_running or []:
e.status.container_statuses.append(container(r, False))
for r in running or []:
e.status.container_statuses.append(container(r, True))
return e
def container(name, running):
c = ContainerStatusMock(name)
c.state = RemotePodMock()
c.state.running = {"a": "b"} if running else None
return c
pod_mock_list = []
pod_mock_list.append(pytest.param(None, False, id="None remote_pod"))
p = RemotePodMock()
p.status = None
pod_mock_list.append(pytest.param(p, False, id="None remote_pod.status"))
p = RemotePodMock()
p.status = RemotePodMock()
p.status.container_statuses = []
pod_mock_list.append(pytest.param(p, False, id="empty remote_pod.status.container_statuses"))
pod_mock_list.append(pytest.param(remote_pod(), False, id="filter empty"))
pod_mock_list.append(pytest.param(remote_pod(None, ["base"]), False, id="filter 0 running"))
pod_mock_list.append(pytest.param(remote_pod(["hello"], ["base"]), False, id="filter 1 not running"))
pod_mock_list.append(pytest.param(remote_pod(["base"], ["hello"]), True, id="filter 1 running"))
return pod_mock_list |
The `container_is_running` function is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This test
verifies the expected behavior. | def test_container_is_running(remote_pod, result):
"""The `container_is_running` function is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This test
verifies the expected behavior."""
assert container_is_running(remote_pod, "base") is result |
The `container_is_succeeded` method is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This function
emits params used in `test_container_is_succeeded` to verify this behavior.
We create mock classes not derived from MagicMock because with an instance `e` of MagicMock,
tests like `e.hello is not None` are always True. | def params_for_test_container_is_succeeded():
"""The `container_is_succeeded` method is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This function
emits params used in `test_container_is_succeeded` to verify this behavior.
We create mock classes not derived from MagicMock because with an instance `e` of MagicMock,
tests like `e.hello is not None` are always True.
"""
class RemotePodMock:
pass
class ContainerStatusMock:
def __init__(self, name):
self.name = name
def remote_pod(succeeded=None, not_succeeded=None):
e = RemotePodMock()
e.status = RemotePodMock()
e.status.container_statuses = []
for r in not_succeeded or []:
e.status.container_statuses.append(container(r, False))
for r in succeeded or []:
e.status.container_statuses.append(container(r, True))
return e
def container(name, succeeded):
c = ContainerStatusMock(name)
c.state = RemotePodMock()
c.state.terminated = SimpleNamespace(**{"exit_code": 0}) if succeeded else None
return c
pod_mock_list = []
pod_mock_list.append(pytest.param(None, False, id="None remote_pod"))
p = RemotePodMock()
p.status = None
pod_mock_list.append(pytest.param(p, False, id="None remote_pod.status"))
p = RemotePodMock()
p.status = RemotePodMock()
p.status.container_statuses = []
pod_mock_list.append(pytest.param(p, False, id="empty remote_pod.status.container_statuses"))
pod_mock_list.append(pytest.param(remote_pod(), False, id="filter empty"))
pod_mock_list.append(pytest.param(remote_pod(None, ["base"]), False, id="filter 0 succeeded"))
pod_mock_list.append(pytest.param(remote_pod(["hello"], ["base"]), False, id="filter 1 not succeeded"))
pod_mock_list.append(pytest.param(remote_pod(["base"], ["hello"]), True, id="filter 1 succeeded"))
return pod_mock_list |
The `container_is_succeeded` function is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This test
verifies the expected behavior. | def test_container_is_succeeded(remote_pod, result):
"""The `container_is_succeeded` function is designed to handle an assortment of bad objects
returned from `read_pod`. E.g. a None object, an object `e` such that `e.status` is None,
an object `e` such that `e.status.container_statuses` is None, and so on. This test
verifies the expected behavior."""
assert container_is_succeeded(remote_pod, "base") is result |
Test Cohere client is getting called with the correct key and that
the execute methods returns expected response. | def test_cohere_embedding_operator(cohere_client, get_connection):
"""
Test Cohere client is getting called with the correct key and that
the execute methods returns expected response.
"""
embedded_obj = [1, 2, 3]
class resp:
embeddings = embedded_obj
api_key = "test"
api_url = "http://some_host.com"
timeout = 150
max_retries = 5
texts = ["On Kernel-Target Alignment. We describe a family of global optimization procedures"]
get_connection.return_value = Connection(conn_type="cohere", password=api_key, host=api_url)
client_obj = MagicMock()
cohere_client.return_value = client_obj
client_obj.embed.return_value = resp
op = CohereEmbeddingOperator(
task_id="embed", conn_id="some_conn", input_text=texts, timeout=timeout, max_retries=max_retries
)
val = op.execute(context={})
cohere_client.assert_called_once_with(
api_key=api_key, api_url=api_url, timeout=timeout, max_retries=max_retries
)
assert val == embedded_obj |
Reset XCom entries. | def reset_db():
"""Reset XCom entries."""
db.clear_db_runs()
db.clear_db_xcom()
yield
db.clear_db_runs()
db.clear_db_xcom() |
Test the execute function in case where SQL query was successful. | def test_exec_success(sql, return_last, split_statement, hook_results, hook_descriptions, expected_results):
"""
Test the execute function in case where SQL query was successful.
"""
class SQLExecuteQueryOperatorForTest(SQLExecuteQueryOperator):
_mock_db_api_hook = MagicMock()
def get_db_hook(self):
return self._mock_db_api_hook
op = SQLExecuteQueryOperatorForTest(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
op._mock_db_api_hook.run.return_value = hook_results
op._mock_db_api_hook.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
op._mock_db_api_hook.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
) |
Test the execute function in case where SQL query was successful. | def test_exec_success_with_process_output(
sql, return_last, split_statement, hook_results, hook_descriptions, expected_results
):
"""
Test the execute function in case where SQL query was successful.
"""
class SQLExecuteQueryOperatorForTestWithProcessOutput(SQLExecuteQueryOperator):
_mock_db_api_hook = MagicMock()
def get_db_hook(self):
return self._mock_db_api_hook
def _process_output(
self, results: list[Any], descriptions: list[Sequence[Sequence] | None]
) -> list[Any]:
return list(zip(descriptions, results))
op = SQLExecuteQueryOperatorForTestWithProcessOutput(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
op._mock_db_api_hook.run.return_value = hook_results
op._mock_db_api_hook.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
op._mock_db_api_hook.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
) |
Utility function to generate the create endpoint given the host. | def create_endpoint(host):
"""
Utility function to generate the create endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/create" |
Utility function to generate the reset endpoint given the host. | def reset_endpoint(host):
"""
Utility function to generate the reset endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/reset" |
Utility function to generate the run now endpoint given the host. | def run_now_endpoint(host):
"""
Utility function to generate the run now endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/run-now" |
Utility function to generate the submit run endpoint given the host. | def submit_run_endpoint(host):
"""
Utility function to generate the submit run endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/runs/submit" |
Utility function to generate the get run endpoint given the host. | def get_run_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/runs/get" |
Utility function to generate the get run output endpoint given the host. | def get_run_output_endpoint(host):
"""
Utility function to generate the get run output endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/runs/get-output" |
Utility function to generate the cancel run endpoint given the host. | def cancel_run_endpoint(host):
"""
Utility function to generate the cancel run endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/runs/cancel" |
Utility function to generate the cancel all runs endpoint given the host. | def cancel_all_runs_endpoint(host):
"""
Utility function to generate the cancel all runs endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/runs/cancel-all" |
Utility function to generate delete run endpoint given the host. | def delete_run_endpoint(host):
"""
Utility function to generate delete run endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/runs/delete" |
Utility function to generate delete run endpoint given the host. | def repair_run_endpoint(host):
"""
Utility function to generate delete run endpoint given the host.
"""
return f"https://{host}/api/2.1/jobs/runs/repair" |
Utility function to generate the get run endpoint given the host. | def get_cluster_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f"https://{host}/api/2.0/clusters/get" |
Utility function to generate the get run endpoint given the host. | def start_cluster_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f"https://{host}/api/2.0/clusters/start" |
Utility function to generate the get run endpoint given the host. | def restart_cluster_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f"https://{host}/api/2.0/clusters/restart" |
Utility function to generate the get run endpoint given the host. | def terminate_cluster_endpoint(host):
"""
Utility function to generate the get run endpoint given the host.
"""
return f"https://{host}/api/2.0/clusters/delete" |
Utility function to generate the install endpoint given the host. | def install_endpoint(host):
"""
Utility function to generate the install endpoint given the host.
"""
return f"https://{host}/api/2.0/libraries/install" |
Utility function to generate the uninstall endpoint given the host. | def uninstall_endpoint(host):
"""
Utility function to generate the uninstall endpoint given the host.
"""
return f"https://{host}/api/2.0/libraries/uninstall" |
Utility function to generate the list jobs endpoint given the host | def list_jobs_endpoint(host):
"""
Utility function to generate the list jobs endpoint given the host
"""
return f"https://{host}/api/2.1/jobs/list" |
Utility function to generate the list jobs endpoint given the host | def list_pipelines_endpoint(host):
"""
Utility function to generate the list jobs endpoint given the host
"""
return f"https://{host}/api/2.0/pipelines" |
Utility function to generate the list spark versions endpoint given the host | def list_spark_versions_endpoint(host):
"""Utility function to generate the list spark versions endpoint given the host"""
return f"https://{host}/api/2.0/clusters/spark-versions" |
Ensure that column names can be used as namedtuple attribute.
namedtuple do not accept special characters and reserved python keywords
as column name. This test ensure that such columns are renamed. | def test_incorrect_column_names(row_objects, fields_names):
"""Ensure that column names can be used as namedtuple attribute.
namedtuple do not accept special characters and reserved python keywords
as column name. This test ensure that such columns are renamed.
"""
result = DatabricksSqlHook()._make_common_data_structure(row_objects)
assert result._fields == fields_names |
Test the execute function in case where SQL query was successful. | def test_exec_success(sql, return_last, split_statement, hook_results, hook_descriptions, expected_results):
"""
Test the execute function in case where SQL query was successful.
"""
with patch("airflow.providers.databricks.operators.databricks_sql.DatabricksSqlHook") as db_mock_class:
op = DatabricksSqlOperator(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
db_mock = db_mock_class.return_value
db_mock.run.return_value = hook_results
db_mock.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
http_path=None,
return_tuple=True,
session_configuration=None,
sql_endpoint_name=None,
http_headers=None,
catalog=None,
schema=None,
caller="DatabricksSqlOperator",
)
db_mock.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
) |
Test the execute function in case where SQL query was successful
and data is written as CSV, JSON. | def test_exec_write_file(
return_last, split_statements, sql, descriptions, hook_results, do_xcom_push, output_format, tmp_path
):
"""
Test the execute function in case where SQL query was successful
and data is written as CSV, JSON.
"""
with patch("airflow.providers.databricks.operators.databricks_sql.DatabricksSqlHook") as db_mock_class:
path = tmp_path / "testfile"
op = DatabricksSqlOperator(
task_id=TASK_ID,
sql=sql,
output_path=os.fspath(path),
output_format=output_format,
return_last=return_last,
do_xcom_push=do_xcom_push,
split_statements=split_statements,
)
db_mock = db_mock_class.return_value
mock_results = hook_results
db_mock.run.return_value = mock_results
db_mock.descriptions = descriptions
op.execute(None)
if output_format == "csv":
results = path.read_text().splitlines()
# In all cases only result of last query i output as file
assert results == ["id,value", "1,value1", "2,value2"]
elif output_format == "json":
results = json.loads(path.read_text())
assert results == [
{"id": 1, "value": "value1"},
{"id": 2, "value": "value2"},
]
elif output_format == "jsonl":
results = path.read_text().splitlines()
assert results == [
'{"id": 1, "value": "value1"}',
'{"id": 2, "value": "value2"}',
]
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
http_path=None,
return_tuple=True,
session_configuration=None,
sql_endpoint_name=None,
http_headers=None,
catalog=None,
schema=None,
caller="DatabricksSqlOperator",
)
db_mock.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statements,
) |
Helper context for mocking multiple reference of same object
:param o: Object/Class for mocking.
:param additional_modules: additional modules where ``o`` exists. | def _mocker_context(o, additional_modules: list | None = None) -> AbstractContextManager[mock.MagicMock]:
"""
Helper context for mocking multiple reference of same object
:param o: Object/Class for mocking.
:param additional_modules: additional modules where ``o`` exists.
"""
patched = []
object_name = o.__name__
mocked_object = mock.MagicMock(name=f"Mocked.{object_name}", spec=o)
additional_modules = additional_modules or []
try:
for mdl in [o.__module__, *additional_modules]:
mocker = mock.patch(f"{mdl}.{object_name}", mocked_object)
mocker.start()
patched.append(mocker)
yield mocked_object
finally:
for mocker in reversed(patched):
mocker.stop() |
Patch ``docker.APIClient`` by mock value. | def docker_api_client_patcher():
"""Patch ``docker.APIClient`` by mock value."""
from airflow.providers.docker.hooks.docker import APIClient
with _mocker_context(APIClient, ["airflow.providers.docker.hooks.docker"]) as m:
yield m |
Patch DockerHook by mock value. | def docker_hook_patcher():
"""Patch DockerHook by mock value."""
from airflow.providers.docker.operators.docker import DockerHook
with _mocker_context(DockerHook, ["airflow.providers.docker.operators.docker"]) as m:
yield m |
Valid attributes for DockerHook. | def hook_kwargs():
"""Valid attributes for DockerHook."""
return {
"base_url": TEST_BASE_URL,
"docker_conn_id": "docker_default",
"tls": False,
"version": TEST_VERSION,
"timeout": 42,
} |
Hook shouldn't create client during initialisation and retrieve Airflow connection. | def test_no_connection_during_initialisation(hook_conn, docker_api_client_patcher, hook_kwargs):
"""Hook shouldn't create client during initialisation and retrieve Airflow connection."""
DockerHook(**hook_kwargs)
hook_conn.assert_not_called()
docker_api_client_patcher.assert_not_called() |
Test mandatory `base_url` Hook argument. | def test_init_fails_when_no_base_url_given(hook_kwargs):
"""Test mandatory `base_url` Hook argument."""
hook_kwargs.pop("base_url")
with pytest.raises(AirflowException, match=r"URL to the Docker server not provided\."):
DockerHook(**hook_kwargs) |
Test warning if user specified tls but use non-https scheme. | def test_init_warn_on_non_https_host_with_enabled_tls(base_url, tls_config, hook_kwargs, caplog):
"""Test warning if user specified tls but use non-https scheme."""
caplog.set_level(logging.WARNING, logger=HOOK_LOGGER_NAME)
hook_kwargs["base_url"] = base_url
hook_kwargs["tls"] = tls_config
DockerHook(**hook_kwargs)
assert "When `tls` specified then `base_url` expected 'https://' schema." in caplog.messages |
Test if not provided optional arguments than Hook init nop failed. | def test_optional_hook_attributes(hook_attr, hook_kwargs):
"""Test if not provided optional arguments than Hook init nop failed."""
hook_kwargs.pop(hook_attr)
DockerHook(**hook_kwargs) |
Test creation ``docker.APIClient`` from hook arguments.
Additionally check:
- Is tls:// changed to https://
- Is ``api_client`` property and ``get_conn`` method cacheable.
- If `docker_conn_id` not provided that hook doesn't try access to Airflow Connections. | def test_create_api_client(conn_id, hook_conn, docker_api_client_patcher, caplog):
"""
Test creation ``docker.APIClient`` from hook arguments.
Additionally check:
- Is tls:// changed to https://
- Is ``api_client`` property and ``get_conn`` method cacheable.
- If `docker_conn_id` not provided that hook doesn't try access to Airflow Connections.
"""
caplog.set_level(logging.DEBUG, logger=HOOK_LOGGER_NAME)
hook = DockerHook(
docker_conn_id=conn_id, base_url=TEST_TLS_BASE_URL, version=TEST_VERSION, tls=True, timeout=42
)
assert "Change `base_url` schema from 'tcp://' to 'https://'." in caplog.messages
caplog.clear()
assert hook.client_created is False
api_client = hook.api_client
assert api_client is hook.get_conn(), "Docker API Client not cacheable"
docker_api_client_patcher.assert_called_once_with(
base_url=TEST_HTTPS_BASE_URL, version=TEST_VERSION, tls=True, timeout=42
)
assert hook.client_created is True |
Test failures during creation ``docker.APIClient`` from hook arguments. | def test_failed_create_api_client(docker_api_client_patcher):
"""Test failures during creation ``docker.APIClient`` from hook arguments."""
hook = DockerHook(base_url=TEST_BASE_URL)
docker_api_client_patcher.side_effect = Exception("Fake Exception")
with pytest.raises(Exception, match="Fake Exception"):
hook.get_conn()
assert hook.client_created is False |
Test success login to Docker Registry with provided connection. | def test_success_login_to_registry(hook_conn, docker_api_client_patcher, expected: dict):
"""Test success login to Docker Registry with provided connection."""
mock_login = mock.MagicMock()
docker_api_client_patcher.return_value.login = mock_login
hook = DockerHook(docker_conn_id=TEST_CONN_ID, base_url=TEST_BASE_URL)
hook.get_conn()
mock_login.assert_called_once_with(**expected) |
Test error during Docker Registry login. | def test_failed_login_to_registry(hook_conn, docker_api_client_patcher, caplog):
"""Test error during Docker Registry login."""
caplog.set_level(logging.ERROR, logger=HOOK_LOGGER_NAME)
docker_api_client_patcher.return_value.login.side_effect = APIError("Fake Error")
hook = DockerHook(docker_conn_id=TEST_CONN_ID, base_url=TEST_BASE_URL)
with pytest.raises(APIError, match="Fake Error"):
hook.get_conn()
assert "Login failed" in caplog.messages |
Test invalid/missing connection parameters. | def test_invalid_conn_parameters(hook_conn, docker_api_client_patcher, ex, error_message):
"""Test invalid/missing connection parameters."""
hook = DockerHook(docker_conn_id=TEST_CONN_ID, base_url=TEST_BASE_URL)
with pytest.raises(ex, match=error_message):
hook.get_conn() |
Test that return False on missing cert/keys arguments. | def test_construct_tls_config_missing_certs_args(tls_params: dict):
"""Test that return False on missing cert/keys arguments."""
assert DockerHook.construct_tls_config(**tls_params) is False |
Test construct ``docker.tls.TLSConfig`` object. | def test_construct_tls_config(assert_hostname, ssl_version):
"""Test construct ``docker.tls.TLSConfig`` object."""
tls_params = {"ca_cert": "test-ca", "client_cert": "foo-bar", "client_key": "spam-egg"}
expected_call_args = {"ca_cert": "test-ca", "client_cert": ("foo-bar", "spam-egg"), "verify": True}
if assert_hostname is not None:
tls_params["assert_hostname"] = assert_hostname
if ssl_version is not None:
tls_params["ssl_version"] = ssl_version
if DOCKER_PY_7_PLUS and (assert_hostname is not None or ssl_version is not None):
ctx = pytest.warns(UserWarning, match=r"removed in `docker\.TLSConfig` constructor arguments")
no_warns = False
else:
ctx = warnings.catch_warnings()
no_warns = True
# Please note that spec should be set; otherwise we could miss removal into the constructor arguments.
with mock.patch.object(TLSConfig, "__init__", return_value=None, spec=TLSConfig) as mock_tls_config:
with ctx:
if no_warns:
warnings.simplefilter("error")
DockerHook.construct_tls_config(**tls_params)
if DOCKER_PY_7_PLUS:
mock_tls_config.assert_called_once_with(**expected_call_args)
else:
mock_tls_config.assert_called_once_with(
**expected_call_args, assert_hostname=assert_hostname, ssl_version=ssl_version
) |
Test connect to valid host from a given list of hosts. | def test_connect_to_valid_host(base_url):
"""Test connect to valid host from a given list of hosts."""
hook = DockerHook(base_url=base_url, docker_conn_id=None)
assert hook.api_client.base_url == "http+docker://localhost" |
Test that operator use DockerHook. | def test_hook_usage(docker_hook_patcher, docker_conn_id, tls_params: dict):
"""Test that operator use DockerHook."""
docker_hook_patcher.construct_tls_config.return_value = "MOCK-TLS-VALUE"
expected_tls_call_args = {
"ca_cert": tls_params.get("tls_ca_cert"),
"client_cert": tls_params.get("tls_client_cert"),
"client_key": tls_params.get("tls_client_key"),
"verify": tls_params.get("tls_verify", True),
"assert_hostname": tls_params.get("tls_hostname"),
"ssl_version": tls_params.get("tls_ssl_version"),
}
op = DockerOperator(
task_id="test_hook_usage_without_tls",
api_version=TEST_API_VERSION,
docker_conn_id=docker_conn_id,
image=TEST_IMAGE,
docker_url=TEST_DOCKER_URL,
timeout=42,
**tls_params,
)
hook = op.hook
assert hook is op.get_hook()
docker_hook_patcher.assert_called_once_with(
docker_conn_id=docker_conn_id,
base_url=TEST_DOCKER_URL,
version=TEST_API_VERSION,
tls="MOCK-TLS-VALUE",
timeout=42,
)
docker_hook_patcher.construct_tls_config.assert_called_once_with(**expected_tls_call_args)
# Check that ``DockerOperator.cli`` property return the same object as ``hook.api_client``.
assert op.cli is hook.api_client |
Test operator on_kill method if APIClient created. | def test_on_kill_client_created(docker_api_client_patcher, container_exists):
"""Test operator on_kill method if APIClient created."""
op = DockerOperator(image=TEST_IMAGE, hostname=TEST_DOCKER_URL, task_id="test_on_kill")
op.container = {"Id": "some_id"} if container_exists else None
op.hook.get_conn() # Try to create APIClient
op.on_kill()
if container_exists:
docker_api_client_patcher.return_value.stop.assert_called_once_with("some_id")
else:
docker_api_client_patcher.return_value.stop.assert_not_called() |
Test operator on_kill method if APIClient not created in case of error. | def test_on_kill_client_not_created(docker_api_client_patcher):
"""Test operator on_kill method if APIClient not created in case of error."""
docker_api_client_patcher.side_effect = APIError("Fake Client Error")
mock_container = mock.MagicMock()
op = DockerOperator(image=TEST_IMAGE, hostname=TEST_DOCKER_URL, task_id="test_on_kill")
op.container = mock_container
with pytest.raises(APIError, match="Fake Client Error"):
op.hook.get_conn()
op.on_kill()
docker_api_client_patcher.return_value.stop.assert_not_called()
mock_container.assert_not_called() |
Tests that the ElasticsearchTaskHandler retrieves the correct configuration keys from the config file.
* old_parameters are removed
* parameters from config are automatically added
* constructor parameters missing from config are also added
:return: | def test_retrieve_config_keys():
"""
Tests that the ElasticsearchTaskHandler retrieves the correct configuration keys from the config file.
* old_parameters are removed
* parameters from config are automatically added
* constructor parameters missing from config are also added
:return:
"""
with conf_vars(
{
("elasticsearch_configs", "http_compress"): "False",
("elasticsearch_configs", "timeout"): "10",
}
):
args_from_config = get_es_kwargs_from_config().keys()
# verify_certs comes from default config value
assert "verify_certs" in args_from_config
# timeout comes from config provided value
assert "timeout" in args_from_config
# http_compress comes from config value
assert "http_compress" in args_from_config
assert "self" not in args_from_config |
Test if retrieve timeout is converted to retry_on_timeout. | def test_retrieve_retry_on_timeout():
"""
Test if retrieve timeout is converted to retry_on_timeout.
"""
with conf_vars(
{
("elasticsearch_configs", "retry_timeout"): "True",
}
):
args_from_config = get_es_kwargs_from_config().keys()
# verify_certs comes from default config value
assert "retry_on_timeout" in args_from_config |
Test if self is not a valid argument. | def test_self_not_valid_arg():
"""
Test if self is not a valid argument.
"""
assert "self" not in VALID_ES_CONFIG_KEYS |
Helper function to transform hosts argument to
:class:`~elasticsearch.Elasticsearch` to a list of dicts. | def _normalize_hosts(hosts):
"""
Helper function to transform hosts argument to
:class:`~elasticsearch.Elasticsearch` to a list of dicts.
"""
# if hosts are empty, just defer to defaults down the line
if hosts is None:
return [{}]
hosts = [hosts]
out = []
for host in hosts:
if "://" not in host:
host = f"//{host}"
parsed_url = urlparse(host)
h = {"host": parsed_url.hostname}
if parsed_url.port:
h["port"] = parsed_url.port
if parsed_url.scheme == "https":
h["port"] = parsed_url.port or 443
h["use_ssl"] = True
if parsed_url.username or parsed_url.password:
h["http_auth"] = f"{unquote(parsed_url.username)}:{unquote(parsed_url.password)}"
if parsed_url.path and parsed_url.path != "/":
h["url_prefix"] = parsed_url.path
out.append(h)
else:
out.append(host)
return out |
Elasticmock decorator | def elasticmock(function):
"""Elasticmock decorator"""
@wraps(function)
def decorated(*args, **kwargs):
ELASTIC_INSTANCES.clear()
with patch("elasticsearch.Elasticsearch", _get_elasticmock):
result = function(*args, **kwargs)
return result
return decorated |
Returns random if for elasticsearch | def get_random_id(size=DEFAULT_ELASTICSEARCH_ID_SIZE):
"""Returns random if for elasticsearch"""
return "".join(random.choices(CHARSET_FOR_ELASTICSEARCH_ID, k=size)) |
Decorator that pops all accepted parameters from method's kwargs and puts
them in the params argument. | def query_params(*es_query_params, **kwargs):
"""
Decorator that pops all accepted parameters from method's kwargs and puts
them in the params argument.
"""
body_params = kwargs.pop("body_params", None)
body_only_params = set(body_params or ()) - set(es_query_params)
body_name = kwargs.pop("body_name", None)
body_required = kwargs.pop("body_required", False)
type_possible_in_params = "type" in es_query_params
assert not (body_name and body_params)
assert not (body_name and body_required)
assert not body_required or body_params
def _wrapper(func):
@wraps(func)
def _wrapped(*args, **kwargs):
params = (kwargs.pop("params", None) or {}).copy()
headers = {k.lower(): v for k, v in (kwargs.pop("headers", None) or {}).copy().items()}
if "opaque_id" in kwargs:
headers["x-opaque-id"] = kwargs.pop("opaque_id")
http_auth = kwargs.pop("http_auth", None)
api_key = kwargs.pop("api_key", None)
using_body_kwarg = kwargs.get("body", None) is not None
using_positional_args = args and len(args) > 1
if type_possible_in_params:
doc_type_in_params = params and "doc_type" in params
doc_type_in_kwargs = "doc_type" in kwargs
if doc_type_in_params:
params["type"] = params.pop("doc_type")
if doc_type_in_kwargs:
kwargs["type"] = kwargs.pop("doc_type")
if using_body_kwarg or using_positional_args:
body_only_params_in_use = body_only_params.intersection(kwargs)
if body_only_params_in_use:
params_prose = "', '".join(sorted(body_only_params_in_use))
plural_params = len(body_only_params_in_use) > 1
raise TypeError(
f"The '{params_prose}' parameter{'s' if plural_params else ''} "
f"{'are' if plural_params else 'is'} only serialized in the "
f"request body and can't be combined with the 'body' parameter. "
f"Either stop using the 'body' parameter and use keyword-arguments "
f"only or move the specified parameters into the 'body'. "
f"See https://github.com/elastic/elasticsearch-py/issues/1698 "
f"for more information"
)
elif set(body_params or ()).intersection(kwargs):
body = {}
for param in body_params:
value = kwargs.pop(param, None)
if value is not None:
body[param.rstrip("_")] = value
kwargs["body"] = body
elif body_required:
kwargs["body"] = {}
if body_name:
if body_name in kwargs:
if using_body_kwarg:
raise TypeError(
f"Can't use '{body_name}' and 'body' parameters together"
f" because '{body_name}' is an alias for 'body'. "
f"Instead you should only use the '{body_name}' "
f"parameter. See https://github.com/elastic/elasticsearch-py/issues/1698 "
f"for more information"
)
kwargs["body"] = kwargs.pop(body_name)
if http_auth is not None and api_key is not None:
raise ValueError("Only one of 'http_auth' and 'api_key' may be passed at a time")
elif http_auth is not None:
headers["authorization"] = f"Basic {_base64_auth_header(http_auth)}"
elif api_key is not None:
headers["authorization"] = f"ApiKey {_base64_auth_header(api_key)}"
for p in es_query_params + GLOBAL_PARAMS:
if p in kwargs:
v = kwargs.pop(p)
if v is not None:
params[p] = _escape(v)
for p in ("ignore", "request_timeout"):
if p in kwargs:
params[p] = kwargs.pop(p)
return func(*args, params=params, headers=headers, **kwargs)
return _wrapped
return _wrapper |
Takes either a 2-tuple or a base64-encoded string
and returns a base64-encoded string to be used
as an HTTP authorization header. | def _base64_auth_header(auth_value):
"""Takes either a 2-tuple or a base64-encoded string
and returns a base64-encoded string to be used
as an HTTP authorization header.
"""
if isinstance(auth_value, (list, tuple)):
auth_value = base64.b64encode(to_bytes(":".join(auth_value)))
return to_str(auth_value) |
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first. | def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated strings
if isinstance(value, (list, tuple)):
value = ",".join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# don't decode bytestrings
elif isinstance(value, bytes):
return value
# encode strings to utf-8
if isinstance(value, str):
return value.encode("utf-8")
return str(value) |
Test the execute function in case where SQL query was successful. | def test_exec_success(sql, return_last, split_statement, hook_results, hook_descriptions, expected_results):
"""
Test the execute function in case where SQL query was successful.
"""
with patch("airflow.providers.common.sql.operators.sql.BaseSQLOperator.get_db_hook") as get_db_hook_mock:
op = ExasolOperator(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
dbapi_hook = MagicMock()
get_db_hook_mock.return_value = dbapi_hook
dbapi_hook.run.return_value = hook_results
dbapi_hook.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
dbapi_hook.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=exasol_fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
) |
Test that usernames are case insensitive | def test_users_can_be_found(app, security_manager, session, caplog):
"""Test that usernames are case insensitive"""
create_user(app, "Test")
create_user(app, "test")
create_user(app, "TEST")
create_user(app, "TeSt")
assert security_manager.find_user("Test")
users = security_manager.get_all_users()
assert len(users) == 1
delete_user(app, "Test")
assert "Error adding new user to database" in caplog.text |
Asserts that a task is deferred and a MLEngineStartTrainingJobTrigger will be fired
when the MLEngineStartTrainingJobOperator is executed in deferrable mode when deferrable=True. | def test_async_create_training_job_should_execute_successfully(mock_hook):
"""
Asserts that a task is deferred and a MLEngineStartTrainingJobTrigger will be fired
when the MLEngineStartTrainingJobOperator is executed in deferrable mode when deferrable=True.
"""
mock_hook.return_value.create_job_without_waiting_result.return_value = "test_training"
op = MLEngineStartTrainingJobOperator(
task_id=TEST_TASK_ID,
project_id=TEST_GCP_PROJECT_ID,
region=TEST_REGION,
job_id=TEST_JOB_ID,
runtime_version=TEST_RUNTIME_VERSION,
python_version=TEST_PYTHON_VERSION,
job_dir=TEST_JOB_DIR,
package_uris=TEST_PACKAGE_URIS,
training_python_module=TEST_TRAINING_PYTHON_MODULE,
training_args=TEST_TRAINING_ARGS,
labels=TEST_LABELS,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(create_context(op))
assert isinstance(
exc.value.trigger, MLEngineStartTrainingJobTrigger
), "Trigger is not a MLEngineStartTrainingJobTrigger" |
Tests that an AirflowException is raised in case of error event | def test_async_create_training_job_should_throw_exception():
"""Tests that an AirflowException is raised in case of error event"""
op = MLEngineStartTrainingJobOperator(
task_id=TEST_TASK_ID,
project_id=TEST_GCP_PROJECT_ID,
region=TEST_REGION,
job_id=TEST_JOB_ID,
runtime_version=TEST_RUNTIME_VERSION,
python_version=TEST_PYTHON_VERSION,
job_dir=TEST_JOB_DIR,
package_uris=TEST_PACKAGE_URIS,
training_python_module=TEST_TRAINING_PYTHON_MODULE,
training_args=TEST_TRAINING_ARGS,
labels=TEST_LABELS,
deferrable=True,
)
with pytest.raises(AirflowException):
op.execute_complete(context=None, event={"status": "error", "message": "test failure message"}) |
Asserts that logging occurs as expected | def test_async_create_training_job_logging_should_execute_successfully():
"""Asserts that logging occurs as expected"""
op = MLEngineStartTrainingJobOperator(
task_id=TEST_TASK_ID,
project_id=TEST_GCP_PROJECT_ID,
region=TEST_REGION,
job_id=TEST_JOB_ID,
runtime_version=TEST_RUNTIME_VERSION,
python_version=TEST_PYTHON_VERSION,
job_dir=TEST_JOB_DIR,
package_uris=TEST_PACKAGE_URIS,
training_python_module=TEST_TRAINING_PYTHON_MODULE,
training_args=TEST_TRAINING_ARGS,
labels=TEST_LABELS,
deferrable=True,
)
with mock.patch.object(op.log, "info") as mock_log_info:
op.execute_complete(
context=create_context(op),
event={"status": "success", "message": "Job completed", "job_id": TEST_TASK_ID},
)
mock_log_info.assert_called_with("%s completed with response %s ", TEST_TASK_ID, "Job completed") |
Creates an empty context. | def context():
"""
Creates an empty context.
"""
context = {}
return context |
This each time this is called mock a time 10 seconds later
than the previous call. | def next_time_side_effect():
"""
This each time this is called mock a time 10 seconds later
than the previous call.
"""
return MOCK_DATE_ARRAY.pop() |
Context manager that provides a temporary value of AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT
connection. It build a new connection that includes path to provided service json,
required scopes and project id.
:param key_file_path: Path to file with FACEBOOK credentials .json file. | def provide_facebook_connection(key_file_path: str):
"""
Context manager that provides a temporary value of AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT
connection. It build a new connection that includes path to provided service json,
required scopes and project id.
:param key_file_path: Path to file with FACEBOOK credentials .json file.
"""
if not key_file_path.endswith(".json"):
raise AirflowException("Use a JSON key file.")
with open(key_file_path) as credentials:
creds = json.load(credentials)
missing_keys = CONFIG_REQUIRED_FIELDS - creds.keys()
if missing_keys:
message = f"{missing_keys} fields are missing"
raise AirflowException(message)
conn = Connection(conn_id=FACEBOOK_CONNECTION_ID, conn_type=CONNECTION_TYPE, extra=json.dumps(creds))
with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}):
yield |
Returns object that mocks asynchronous looping over mock jobs | def mock_jobs(names: list[str], latest_operation_names: list[str | None]):
"""Returns object that mocks asynchronous looping over mock jobs"""
jobs = [mock.MagicMock(latest_operation_name=name) for name in latest_operation_names]
for job, name in zip(jobs, names):
job.name = name
mock_obj = mock.MagicMock()
mock_obj.__aiter__.return_value = iter(jobs)
return mock_obj |
Helper fixture for assert if any log message for the specific logger captured.
This is workaround for fix issue with asyncio in ``test_disable_logging``, see:
- https://github.com/apache/airflow/pull/26871
- https://github.com/apache/airflow/pull/26973
- https://github.com/apache/airflow/pull/36453 | def assert_no_logs(caplog):
"""
Helper fixture for assert if any log message for the specific logger captured.
This is workaround for fix issue with asyncio in ``test_disable_logging``, see:
- https://github.com/apache/airflow/pull/26871
- https://github.com/apache/airflow/pull/26973
- https://github.com/apache/airflow/pull/36453
"""
@contextmanager
def wrapper(level: str, logger: str):
with caplog.at_level(level=level, logger=logger):
caplog.clear()
yield
if records := list(filter(lambda lr: lr[0] == logger, caplog.record_tuples)):
msg = f"Did not expect any log message from logger={logger!r} but got:"
for log_record in records:
msg += f"\n * logger name: {log_record[0]!r}, level: {log_record[1]}, msg: {log_record[2]!r}"
raise AssertionError(msg)
return wrapper |
Creates mock async API response. | def aioresponse():
"""
Creates mock async API response.
"""
with aioresponses() as async_response:
yield async_response |
Helper fixture for create test connection. | def create_mock_connection(monkeypatch):
"""Helper fixture for create test connection."""
def wrapper(conn: T, conn_id: str | None = None):
conn_id = conn_id or "test_conn_" + "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
if isinstance(conn, dict):
conn = Connection.from_json(conn)
elif isinstance(conn, str):
conn = Connection(uri=conn)
if not isinstance(conn, Connection):
raise TypeError(
f"Fixture expected either JSON, URI or Connection type, but got {type(conn).__name__}"
)
if not conn.conn_id:
conn.conn_id = conn_id
monkeypatch.setenv(f"AIRFLOW_CONN_{conn.conn_id.upper()}", conn.get_uri())
return conn
return wrapper |
Helper fixture for create multiple test connections. | def create_mock_connections(create_mock_connection):
"""Helper fixture for create multiple test connections."""
def wrapper(*conns: T):
return list(map(create_mock_connection, conns))
return wrapper |
Helper indirect fixture for create test connection. | def mocked_connection(request, create_mock_connection):
"""Helper indirect fixture for create test connection."""
return create_mock_connection(request.param) |
Test refresh_conn method _conn is reset and get_conn is called | def test_refresh_conn(hook):
"""Test refresh_conn method _conn is reset and get_conn is called"""
with patch.object(hook, "get_conn") as mock_get_conn:
hook.refresh_conn()
assert not hook._conn
assert mock_get_conn.called |
Make a mock Container Group as the underlying azure Models have read-only attributes
See https://docs.microsoft.com/en-us/rest/api/container-instances/containergroups | def make_mock_cg(container_state, events=None):
"""
Make a mock Container Group as the underlying azure Models have read-only attributes
See https://docs.microsoft.com/en-us/rest/api/container-instances/containergroups
"""
events = events or []
instance_view_dict = {"current_state": container_state, "events": events}
instance_view = namedtuple("ContainerPropertiesInstanceView", instance_view_dict.keys())(
*instance_view_dict.values()
)
container_dict = {"instance_view": instance_view}
container = namedtuple("Containers", container_dict.keys())(*container_dict.values())
container_g_dict = {"containers": [container]}
container_g = namedtuple("ContainerGroup", container_g_dict.keys())(*container_g_dict.values())
return container_g |
Make a mock Container Group as the underlying azure Models have read-only attributes
See https://docs.microsoft.com/en-us/rest/api/container-instances/containergroups
This creates the Container Group without events.
This can happen, when the container group is provisioned, but not started. | def make_mock_cg_with_missing_events(container_state):
"""
Make a mock Container Group as the underlying azure Models have read-only attributes
See https://docs.microsoft.com/en-us/rest/api/container-instances/containergroups
This creates the Container Group without events.
This can happen, when the container group is provisioned, but not started.
"""
instance_view_dict = {"current_state": container_state, "events": None}
instance_view = namedtuple("ContainerPropertiesInstanceView", instance_view_dict.keys())(
*instance_view_dict.values()
)
container_dict = {"instance_view": instance_view}
container = namedtuple("Containers", container_dict.keys())(*container_dict.values())
container_g_dict = {"containers": [container]}
container_g = namedtuple("ContainerGroup", container_g_dict.keys())(*container_g_dict.values())
return container_g |
Create MongoDB connections which use for testing purpose. | def mongo_connections():
"""Create MongoDB connections which use for testing purpose."""
connections = [
Connection(conn_id="mongo_default", conn_type="mongo", host="mongo", port=27017),
Connection(
conn_id="mongo_default_with_srv",
conn_type="mongo",
host="mongo",
port=27017,
extra='{"srv": true}',
),
# Mongo establishes connection during initialization, so we need to have this connection
Connection(conn_id="fake_connection", conn_type="mongo", host="mongo", port=27017),
]
with pytest.MonkeyPatch.context() as mp:
for conn in connections:
mp.setenv(f"AIRFLOW_CONN_{conn.conn_id.upper()}", conn.as_json())
yield |
Mock a pyodbc.Row instantiated object.
This object is used in the tests to replace the real pyodbc.Row object.
pyodbc.Row is a C object that can only be created from C API of pyodbc.
This mock implements the two features used by the hook:
- cursor_description: which return column names and type
- __iter__: which allows exploding a row instance (*row) | def pyodbc_row_mock():
"""Mock a pyodbc.Row instantiated object.
This object is used in the tests to replace the real pyodbc.Row object.
pyodbc.Row is a C object that can only be created from C API of pyodbc.
This mock implements the two features used by the hook:
- cursor_description: which return column names and type
- __iter__: which allows exploding a row instance (*row)
"""
@dataclass
class Row:
key: int
column: str
def __iter__(self):
yield self.key
yield self.column
@property
def cursor_description(self):
return [
("key", int, None, 11, 11, 0, None),
("column", str, None, 256, 256, 0, None),
]
return Row |
Mock a pyodbc.Row class which returns True to any isinstance() checks. | def pyodbc_instancecheck():
"""Mock a pyodbc.Row class which returns True to any isinstance() checks."""
class PyodbcRowMeta(type):
def __instancecheck__(self, instance):
return True
class PyodbcRow(metaclass=PyodbcRowMeta):
pass
return PyodbcRow |
Sets up the mock listener with side effects to capture try numbers for different task instance events.
:param mock_listener: The mock object for the listener manager.
:param captured_try_numbers: A dictionary to store captured try numbers keyed by event names.
This function iterates through specified event names and sets a side effect on the corresponding
method of the listener manager's hook. The side effect is a nested function that captures the try number
of the task instance when the method is called.
:Example:
captured_try_numbers = {}
mock_listener = Mock()
_setup_mock_listener(mock_listener, captured_try_numbers)
# After running a task, captured_try_numbers will have the try number captured at the moment of
execution for specified methods. F.e. {"running": 1, "success": 2} for on_task_instance_running and
on_task_instance_success methods. | def _setup_mock_listener(mock_listener: mock.Mock, captured_try_numbers: dict[str, int]) -> None:
"""Sets up the mock listener with side effects to capture try numbers for different task instance events.
:param mock_listener: The mock object for the listener manager.
:param captured_try_numbers: A dictionary to store captured try numbers keyed by event names.
This function iterates through specified event names and sets a side effect on the corresponding
method of the listener manager's hook. The side effect is a nested function that captures the try number
of the task instance when the method is called.
:Example:
captured_try_numbers = {}
mock_listener = Mock()
_setup_mock_listener(mock_listener, captured_try_numbers)
# After running a task, captured_try_numbers will have the try number captured at the moment of
execution for specified methods. F.e. {"running": 1, "success": 2} for on_task_instance_running and
on_task_instance_success methods.
"""
def capture_try_number(method_name):
def inner(*args, **kwargs):
captured_try_numbers[method_name] = kwargs["task_instance"].try_number
return inner
for event in ["running", "success", "failed"]:
getattr(
mock_listener.return_value.hook, f"on_task_instance_{event}"
).side_effect = capture_try_number(event) |
Creates a test DAG and a task for a custom test scenario.
:param python_callable: The Python callable to be executed by the PythonOperator.
:param scenario_name: The name of the test scenario, used to uniquely name the DAG and task.
:return: TaskInstance: The created TaskInstance object.
This function creates a DAG and a PythonOperator task with the provided python_callable. It generates a unique
run ID and creates a DAG run. This setup is useful for testing different scenarios in Airflow tasks.
:Example:
def sample_callable(**kwargs):
print("Hello World")
task_instance = _create_test_dag_and_task(sample_callable, "sample_scenario")
# Use task_instance to simulate running a task in a test. | def _create_test_dag_and_task(python_callable: Callable, scenario_name: str) -> tuple[DagRun, TaskInstance]:
"""Creates a test DAG and a task for a custom test scenario.
:param python_callable: The Python callable to be executed by the PythonOperator.
:param scenario_name: The name of the test scenario, used to uniquely name the DAG and task.
:return: TaskInstance: The created TaskInstance object.
This function creates a DAG and a PythonOperator task with the provided python_callable. It generates a unique
run ID and creates a DAG run. This setup is useful for testing different scenarios in Airflow tasks.
:Example:
def sample_callable(**kwargs):
print("Hello World")
task_instance = _create_test_dag_and_task(sample_callable, "sample_scenario")
# Use task_instance to simulate running a task in a test.
"""
dag = DAG(
f"test_{scenario_name}",
start_date=dt.datetime(2022, 1, 1),
)
t = PythonOperator(task_id=f"test_task_{scenario_name}", dag=dag, python_callable=python_callable)
run_id = str(uuid.uuid1())
dagrun = dag.create_dagrun(state=State.NONE, run_id=run_id) # type: ignore
task_instance = TaskInstance(t, run_id=run_id)
return dagrun, task_instance |
Creates and configures an OpenLineageListener instance and a mock TaskInstance for testing.
:return: A tuple containing the configured OpenLineageListener and TaskInstance.
This function instantiates an OpenLineageListener, sets up its required properties with mock objects, and
creates a mock TaskInstance with predefined attributes. This setup is commonly used for testing the
interaction between an OpenLineageListener and a TaskInstance in Airflow.
:Example:
listener, task_instance = _create_listener_and_task_instance()
# Now you can use listener and task_instance in your tests to simulate their interaction. | def _create_listener_and_task_instance() -> tuple[OpenLineageListener, TaskInstance]:
"""Creates and configures an OpenLineageListener instance and a mock TaskInstance for testing.
:return: A tuple containing the configured OpenLineageListener and TaskInstance.
This function instantiates an OpenLineageListener, sets up its required properties with mock objects, and
creates a mock TaskInstance with predefined attributes. This setup is commonly used for testing the
interaction between an OpenLineageListener and a TaskInstance in Airflow.
:Example:
listener, task_instance = _create_listener_and_task_instance()
# Now you can use listener and task_instance in your tests to simulate their interaction.
"""
def mock_task_id(dag_id, task_id, execution_date, try_number):
return f"{dag_id}.{task_id}.{execution_date}.{try_number}"
listener = OpenLineageListener()
listener.log = mock.Mock()
listener.extractor_manager = mock.Mock()
metadata = mock.Mock()
metadata.run_facets = {"run_facet": 1}
listener.extractor_manager.extract_metadata.return_value = metadata
adapter = mock.Mock()
adapter.build_dag_run_id.side_effect = lambda x, y: f"{x}.{y}"
adapter.build_task_instance_run_id.side_effect = mock_task_id
adapter.start_task = mock.Mock()
adapter.fail_task = mock.Mock()
adapter.complete_task = mock.Mock()
listener.adapter = adapter
task_instance = TaskInstance(task=mock.Mock())
task_instance.dag_run = DagRun()
task_instance.dag_run.run_id = "dag_run_run_id"
task_instance.dag_run.data_interval_start = None
task_instance.dag_run.data_interval_end = None
task_instance.task = mock.Mock()
task_instance.task.task_id = "task_id"
task_instance.task.dag = mock.Mock()
task_instance.task.dag.dag_id = "dag_id"
task_instance.task.dag.description = "Test DAG Description"
task_instance.task.dag.owner = "Test Owner"
task_instance.dag_id = "dag_id"
task_instance.run_id = "dag_run_run_id"
task_instance.try_number = 1
task_instance.state = State.RUNNING
task_instance.start_date = dt.datetime(2023, 1, 1, 13, 1, 1)
task_instance.end_date = dt.datetime(2023, 1, 3, 13, 1, 1)
task_instance.execution_date = "execution_date"
task_instance.next_method = None # Ensure this is None to reach start_task
return listener, task_instance |
Tests that the 'start_task' method of the OpenLineageAdapter is invoked with the correct arguments.
The test checks that the job name, job description, event time, and other related data are
correctly passed to the adapter. It also verifies that custom facets and Airflow run facets are
correctly retrieved and included in the call. This ensures that all relevant data, including custom
and Airflow-specific metadata, is accurately conveyed to the adapter during the initialization of a task,
reflecting the comprehensive tracking of task execution contexts. | def test_adapter_start_task_is_called_with_proper_arguments(
mock_get_job_name, mock_get_custom_facets, mock_get_airflow_run_facet, mock_disabled
):
"""Tests that the 'start_task' method of the OpenLineageAdapter is invoked with the correct arguments.
The test checks that the job name, job description, event time, and other related data are
correctly passed to the adapter. It also verifies that custom facets and Airflow run facets are
correctly retrieved and included in the call. This ensures that all relevant data, including custom
and Airflow-specific metadata, is accurately conveyed to the adapter during the initialization of a task,
reflecting the comprehensive tracking of task execution contexts.
"""
listener, task_instance = _create_listener_and_task_instance()
mock_get_job_name.return_value = "job_name"
mock_get_custom_facets.return_value = {"custom_facet": 2}
mock_get_airflow_run_facet.return_value = {"airflow_run_facet": 3}
mock_disabled.return_value = False
listener.on_task_instance_running(None, task_instance, None)
listener.adapter.start_task.assert_called_once_with(
run_id="dag_id.task_id.execution_date.1",
job_name="job_name",
job_description="Test DAG Description",
event_time="2023-01-01T13:01:01",
parent_job_name="dag_id",
parent_run_id="dag_id.dag_run_run_id",
code_location=None,
nominal_start_time=None,
nominal_end_time=None,
owners=["Test Owner"],
task=listener.extractor_manager.extract_metadata(),
run_facets={
"custom_facet": 2,
"airflow_run_facet": 3,
},
) |
Subsets and Splits