repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow | airflow-main/airflow/providers/amazon/aws/triggers/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/s3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from datetime import datetime
from functools import cached_property
from typing import Any, AsyncIterator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class S3KeyTrigger(BaseTrigger):
"""
S3KeyTrigger is fired as deferred class with params to run the task in trigger worker.
:param bucket_name: Name of the S3 bucket. Only needed when ``bucket_key``
is not provided as a full s3:// url.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level. When it's specified as a full s3://
url, please leave bucket_name as `None`.
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:param aws_conn_id: reference to the s3 connection
:param hook_params: params for hook its optional
"""
def __init__(
self,
bucket_name: str,
bucket_key: str | list[str],
wildcard_match: bool = False,
aws_conn_id: str = "aws_default",
poke_interval: float = 5.0,
should_check_fn: bool = False,
**hook_params: Any,
):
super().__init__()
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.aws_conn_id = aws_conn_id
self.hook_params = hook_params
self.poke_interval = poke_interval
self.should_check_fn = should_check_fn
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize S3KeyTrigger arguments and classpath."""
return (
"airflow.providers.amazon.aws.triggers.s3.S3KeyTrigger",
{
"bucket_name": self.bucket_name,
"bucket_key": self.bucket_key,
"wildcard_match": self.wildcard_match,
"aws_conn_id": self.aws_conn_id,
"hook_params": self.hook_params,
"poke_interval": self.poke_interval,
"should_check_fn": self.should_check_fn,
},
)
@cached_property
def hook(self) -> S3Hook:
return S3Hook(aws_conn_id=self.aws_conn_id, verify=self.hook_params.get("verify"))
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make an asynchronous connection using S3HookAsync."""
try:
async with self.hook.async_conn as client:
while True:
if await self.hook.check_key_async(
client, self.bucket_name, self.bucket_key, self.wildcard_match
):
if self.should_check_fn:
s3_objects = await self.hook.get_files_async(
client, self.bucket_name, self.bucket_key, self.wildcard_match
)
await asyncio.sleep(self.poke_interval)
yield TriggerEvent({"status": "running", "files": s3_objects})
else:
yield TriggerEvent({"status": "success"})
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
class S3KeysUnchangedTrigger(BaseTrigger):
"""
S3KeysUnchangedTrigger is fired as deferred class with params to run the task in trigger worker.
:param bucket_name: Name of the S3 bucket. Only needed when ``bucket_key``
is not provided as a full s3:// url.
:param prefix: The prefix being waited on. Relative path from bucket root level.
:param inactivity_period: The total seconds of inactivity to designate
keys unchanged. Note, this mechanism is not real time and
this operator may not return until a poke_interval after this period
has passed with no additional objects sensed.
:param min_objects: The minimum number of objects needed for keys unchanged
sensor to be considered valid.
:param inactivity_seconds: reference to the seconds of inactivity
:param previous_objects: The set of object ids found during the last poke.
:param allow_delete: Should this sensor consider objects being deleted
:param aws_conn_id: reference to the s3 connection
:param last_activity_time: last modified or last active time
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
:param hook_params: params for hook its optional
"""
def __init__(
self,
bucket_name: str,
prefix: str,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
inactivity_seconds: int = 0,
previous_objects: set[str] | None = None,
allow_delete: bool = True,
aws_conn_id: str = "aws_default",
last_activity_time: datetime | None = None,
verify: bool | str | None = None,
**hook_params: Any,
):
super().__init__()
self.bucket_name = bucket_name
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
if not previous_objects:
previous_objects = set()
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects
self.inactivity_seconds = inactivity_seconds
self.allow_delete = allow_delete
self.aws_conn_id = aws_conn_id
self.last_activity_time = last_activity_time
self.verify = verify
self.polling_period_seconds = 0
self.hook_params = hook_params
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize S3KeysUnchangedTrigger arguments and classpath."""
return (
"airflow.providers.amazon.aws.triggers.s3.S3KeysUnchangedTrigger",
{
"bucket_name": self.bucket_name,
"prefix": self.prefix,
"inactivity_period": self.inactivity_period,
"min_objects": self.min_objects,
"previous_objects": self.previous_objects,
"inactivity_seconds": self.inactivity_seconds,
"allow_delete": self.allow_delete,
"aws_conn_id": self.aws_conn_id,
"last_activity_time": self.last_activity_time,
"hook_params": self.hook_params,
"verify": self.verify,
"polling_period_seconds": self.polling_period_seconds,
},
)
@cached_property
def hook(self) -> S3Hook:
return S3Hook(aws_conn_id=self.aws_conn_id, verify=self.hook_params.get("verify"))
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make an asynchronous connection using S3Hook."""
try:
async with self.hook.async_conn as client:
while True:
result = await self.hook.is_keys_unchanged_async(
client=client,
bucket_name=self.bucket_name,
prefix=self.prefix,
inactivity_period=self.inactivity_period,
min_objects=self.min_objects,
previous_objects=self.previous_objects,
inactivity_seconds=self.inactivity_seconds,
allow_delete=self.allow_delete,
last_activity_time=self.last_activity_time,
)
if result.get("status") == "success" or result.get("status") == "error":
yield TriggerEvent(result)
elif result.get("status") == "pending":
self.previous_objects = result.get("previous_objects", set())
self.last_activity_time = result.get("last_activity_time")
self.inactivity_seconds = result.get("inactivity_seconds", 0)
await asyncio.sleep(self.polling_period_seconds)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
| 9,085 | 42.89372 | 100 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/batch_client.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A client for AWS Batch services.
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
- https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
"""
from __future__ import annotations
from random import uniform
from time import sleep
from typing import Callable
import botocore.client
import botocore.exceptions
import botocore.waiter
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.utils.task_log_fetcher import AwsTaskLogFetcher
from airflow.typing_compat import Protocol, runtime_checkable
@runtime_checkable
class BatchProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('batch') -> botocore.client.Batch``.
This is used for type hints on :py:meth:`.BatchClient.client`; it covers
only the subset of client methods required.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
"""
def describe_jobs(self, jobs: list[str]) -> dict:
"""
Get job descriptions from AWS Batch.
:param jobs: a list of JobId to describe
:return: an API response to describe jobs
"""
...
def get_waiter(self, waiterName: str) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter.
:param waiterName: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:return: a waiter object for the named AWS Batch service
.. note::
AWS Batch might not have any waiters (until botocore PR-1307 is released).
.. code-block:: python
import boto3
boto3.client("batch").waiter_names == []
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/pull/1307
"""
...
def submit_job(
self,
jobName: str,
jobQueue: str,
jobDefinition: str,
arrayProperties: dict,
parameters: dict,
containerOverrides: dict,
tags: dict,
) -> dict:
"""
Submit a Batch job.
:param jobName: the name for the AWS Batch job
:param jobQueue: the queue name on AWS Batch
:param jobDefinition: the job definition name on AWS Batch
:param arrayProperties: the same parameter that boto3 will receive
:param parameters: the same parameter that boto3 will receive
:param containerOverrides: the same parameter that boto3 will receive
:param tags: the same parameter that boto3 will receive
:return: an API response
"""
...
def terminate_job(self, jobId: str, reason: str) -> dict:
"""
Terminate a Batch job.
:param jobId: a job ID to terminate
:param reason: a reason to terminate job ID
:return: an API response
"""
...
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
class BatchClientHook(AwsBaseHook):
"""
Interact with AWS Batch.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("batch") <Batch.Client>`.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
.. note::
Several methods use a default random delay to check or poll for job status, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``
Using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
To modify the global defaults for the range of jitter allowed when a
random delay is used to check Batch job status, modify these defaults, e.g.:
.. code-block::
BatchClient.DEFAULT_DELAY_MIN = 0
BatchClient.DEFAULT_DELAY_MAX = 5
When explicit delay values are used, a 1 second random jitter is applied to the
delay (e.g. a delay of 0 sec will be a ``random.uniform(0, 1)`` delay. It is
generally recommended that random jitter is added to API requests. A
convenience method is provided for this, e.g. to get a random delay of
10 sec +/- 5 sec: ``delay = BatchClient.add_jitter(10, width=5, minima=0)``
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
MAX_RETRIES = 4200
STATUS_RETRIES = 10
# delays are in seconds
DEFAULT_DELAY_MIN = 1
DEFAULT_DELAY_MAX = 10
FAILURE_STATE = "FAILED"
SUCCESS_STATE = "SUCCEEDED"
RUNNING_STATE = "RUNNING"
INTERMEDIATE_STATES = (
"SUBMITTED",
"PENDING",
"RUNNABLE",
"STARTING",
RUNNING_STATE,
)
COMPUTE_ENVIRONMENT_TERMINAL_STATUS = ("VALID", "DELETED")
COMPUTE_ENVIRONMENT_INTERMEDIATE_STATUS = ("CREATING", "UPDATING", "DELETING")
JOB_QUEUE_TERMINAL_STATUS = ("VALID", "DELETED")
JOB_QUEUE_INTERMEDIATE_STATUS = ("CREATING", "UPDATING", "DELETING")
def __init__(
self, *args, max_retries: int | None = None, status_retries: int | None = None, **kwargs
) -> None:
# https://github.com/python/mypy/issues/6799 hence type: ignore
super().__init__(client_type="batch", *args, **kwargs) # type: ignore
self.max_retries = max_retries or self.MAX_RETRIES
self.status_retries = status_retries or self.STATUS_RETRIES
@property
def client(self) -> BatchProtocol | botocore.client.BaseClient:
"""
An AWS API client for Batch services.
:return: a boto3 'batch' client for the ``.region_name``
"""
return self.conn
def terminate_job(self, job_id: str, reason: str) -> dict:
"""
Terminate a Batch job.
:param job_id: a job ID to terminate
:param reason: a reason to terminate job ID
:return: an API response
"""
response = self.get_conn().terminate_job(jobId=job_id, reason=reason)
self.log.info(response)
return response
def check_job_success(self, job_id: str) -> bool:
"""
Check the final status of the Batch job.
Return True if the job 'SUCCEEDED', else raise an AirflowException.
:param job_id: a Batch job ID
:raises: AirflowException
"""
job = self.get_job_description(job_id)
job_status = job.get("status")
if job_status == self.SUCCESS_STATE:
self.log.info("AWS Batch job (%s) succeeded: %s", job_id, job)
return True
if job_status == self.FAILURE_STATE:
raise AirflowException(f"AWS Batch job ({job_id}) failed: {job}")
if job_status in self.INTERMEDIATE_STATES:
raise AirflowException(f"AWS Batch job ({job_id}) is not complete: {job}")
raise AirflowException(f"AWS Batch job ({job_id}) has unknown status: {job}")
def wait_for_job(
self,
job_id: str,
delay: int | float | None = None,
get_batch_log_fetcher: Callable[[str], AwsTaskLogFetcher | None] | None = None,
) -> None:
"""
Wait for Batch job to complete.
:param job_id: a Batch job ID
:param delay: a delay before polling for job status
:param get_batch_log_fetcher : a method that returns batch_log_fetcher
:raises: AirflowException
"""
self.delay(delay)
self.poll_for_job_running(job_id, delay)
batch_log_fetcher = None
try:
if get_batch_log_fetcher:
batch_log_fetcher = get_batch_log_fetcher(job_id)
if batch_log_fetcher:
batch_log_fetcher.start()
self.poll_for_job_complete(job_id, delay)
finally:
if batch_log_fetcher:
batch_log_fetcher.stop()
batch_log_fetcher.join()
self.log.info("AWS Batch job (%s) has completed", job_id)
def poll_for_job_running(self, job_id: str, delay: int | float | None = None) -> None:
"""
Poll for job running.
The status that indicates a job is running or already complete are: 'RUNNING'|'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'|'SUCCEEDED'|'FAILED'
The completed status options are included for cases where the status
changes too quickly for polling to detect a RUNNING status that moves
quickly from STARTING to RUNNING to completed (often a failure).
:param job_id: a Batch job ID
:param delay: a delay before polling for job status
:raises: AirflowException
"""
self.delay(delay)
running_status = [self.RUNNING_STATE, self.SUCCESS_STATE, self.FAILURE_STATE]
self.poll_job_status(job_id, running_status)
def poll_for_job_complete(self, job_id: str, delay: int | float | None = None) -> None:
"""
Poll for job completion.
The status that indicates job completion are: 'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'>'SUCCEEDED'|'FAILED'
:param job_id: a Batch job ID
:param delay: a delay before polling for job status
:raises: AirflowException
"""
self.delay(delay)
complete_status = [self.SUCCESS_STATE, self.FAILURE_STATE]
self.poll_job_status(job_id, complete_status)
def poll_job_status(self, job_id: str, match_status: list[str]) -> bool:
"""
Poll for job status using an exponential back-off strategy (with max_retries).
:param job_id: a Batch job ID
:param match_status: a list of job status to match; the Batch job status are:
'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
:raises: AirflowException
"""
retries = 0
while True:
job = self.get_job_description(job_id)
job_status = job.get("status")
self.log.info(
"AWS Batch job (%s) check status (%s) in %s",
job_id,
job_status,
match_status,
)
if job_status in match_status:
return True
if retries >= self.max_retries:
raise AirflowException(f"AWS Batch job ({job_id}) status checks exceed max_retries")
retries += 1
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) status check (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.max_retries,
pause,
)
self.delay(pause)
def get_job_description(self, job_id: str) -> dict:
"""
Get job description (using status_retries).
:param job_id: a Batch job ID
:return: an API response for describe jobs
:raises: AirflowException
"""
retries = 0
while True:
try:
response = self.get_conn().describe_jobs(jobs=[job_id])
return self.parse_job_description(job_id, response)
except botocore.exceptions.ClientError as err:
# Allow it to retry in case of exceeded quota limit of requests to AWS API
if err.response.get("Error", {}).get("Code") != "TooManyRequestsException":
raise
self.log.warning(
"Ignored TooManyRequestsException error, original message: %r. "
"Please consider to setup retries mode in boto3, "
"check Amazon Provider AWS Connection documentation for more details.",
str(err),
)
retries += 1
if retries >= self.status_retries:
raise AirflowException(
f"AWS Batch job ({job_id}) description error: exceeded status_retries "
f"({self.status_retries})"
)
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) description retry (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.status_retries,
pause,
)
self.delay(pause)
@staticmethod
def parse_job_description(job_id: str, response: dict) -> dict:
"""
Parse job description to extract description for job_id.
:param job_id: a Batch job ID
:param response: an API response for describe jobs
:return: an API response to describe job_id
:raises: AirflowException
"""
jobs = response.get("jobs", [])
matching_jobs = [job for job in jobs if job.get("jobId") == job_id]
if len(matching_jobs) != 1:
raise AirflowException(f"AWS Batch job ({job_id}) description error: response: {response}")
return matching_jobs[0]
def get_job_awslogs_info(self, job_id: str) -> dict[str, str] | None:
all_info = self.get_job_all_awslogs_info(job_id)
if not all_info:
return None
if len(all_info) > 1:
self.log.warning(
f"AWS Batch job ({job_id}) has more than one log stream, only returning the first one."
)
return all_info[0]
def get_job_all_awslogs_info(self, job_id: str) -> list[dict[str, str]]:
"""
Parse job description to extract AWS CloudWatch information.
:param job_id: AWS Batch Job ID
"""
job_desc = self.get_job_description(job_id=job_id)
job_node_properties = job_desc.get("nodeProperties", {})
job_container_desc = job_desc.get("container", {})
if job_node_properties:
# one log config per node
log_configs = [
p.get("container", {}).get("logConfiguration", {})
for p in job_node_properties.get("nodeRangeProperties", {})
]
# one stream name per attempt
stream_names = [a.get("container", {}).get("logStreamName") for a in job_desc.get("attempts", [])]
elif job_container_desc:
log_configs = [job_container_desc.get("logConfiguration", {})]
stream_name = job_container_desc.get("logStreamName")
stream_names = [stream_name] if stream_name is not None else []
else:
raise AirflowException(
f"AWS Batch job ({job_id}) is not a supported job type. "
"Supported job types: container, array, multinode."
)
# If the user selected another logDriver than "awslogs", then CloudWatch logging is disabled.
if any([c.get("logDriver", "awslogs") != "awslogs" for c in log_configs]):
self.log.warning(
f"AWS Batch job ({job_id}) uses non-aws log drivers. AWS CloudWatch logging disabled."
)
return []
if not stream_names:
# If this method is called very early after starting the AWS Batch job,
# there is a possibility that the AWS CloudWatch Stream Name would not exist yet.
# This can also happen in case of misconfiguration.
self.log.warning(f"AWS Batch job ({job_id}) doesn't have any AWS CloudWatch Stream.")
return []
# Try to get user-defined log configuration options
log_options = [c.get("options", {}) for c in log_configs]
# cross stream names with options (i.e. attempts X nodes) to generate all log infos
result = []
for stream in stream_names:
for option in log_options:
result.append(
{
"awslogs_stream_name": stream,
# If the user did not specify anything, the default settings are:
# awslogs-group = /aws/batch/job
# awslogs-region = `same as AWS Batch Job region`
"awslogs_group": option.get("awslogs-group", "/aws/batch/job"),
"awslogs_region": option.get("awslogs-region", self.conn_region_name),
}
)
return result
@staticmethod
def add_jitter(delay: int | float, width: int | float = 1, minima: int | float = 0) -> float:
"""
Use delay +/- width for random jitter.
Adding jitter to status polling can help to avoid
AWS Batch API limits for monitoring Batch jobs with
a high concurrency in Airflow tasks.
:param delay: number of seconds to pause;
delay is assumed to be a positive number
:param width: delay +/- width for random jitter;
width is assumed to be a positive number
:param minima: minimum delay allowed;
minima is assumed to be a non-negative number
:return: uniform(delay - width, delay + width) jitter
and it is a non-negative number
"""
delay = abs(delay)
width = abs(width)
minima = abs(minima)
lower = max(minima, delay - width)
upper = delay + width
return uniform(lower, upper)
@staticmethod
def delay(delay: int | float | None = None) -> None:
"""
Pause execution for ``delay`` seconds.
:param delay: a delay to pause execution using ``time.sleep(delay)``;
a small 1 second jitter is applied to the delay.
.. note::
This method uses a default random delay, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``;
using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
"""
if delay is None:
delay = uniform(BatchClientHook.DEFAULT_DELAY_MIN, BatchClientHook.DEFAULT_DELAY_MAX)
else:
delay = BatchClientHook.add_jitter(delay)
sleep(delay)
@staticmethod
def exponential_delay(tries: int) -> float:
"""
An exponential back-off delay, with random jitter.
There is a maximum interval of 10 minutes (with random jitter between 3 and 10 minutes).
This is used in the :py:meth:`.poll_for_job_status` method.
:param tries: Number of tries
Examples of behavior:
.. code-block:: python
def exp(tries):
max_interval = 600.0 # 10 minutes in seconds
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
print(delay / 3, delay)
for tries in range(10):
exp(tries)
# 0.33 1.0
# 0.45 1.35
# 0.81 2.44
# 1.41 4.23
# 2.25 6.76
# 3.33 10.00
# 4.65 13.95
# 6.21 18.64
# 8.01 24.04
# 10.05 30.15
.. seealso::
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
max_interval = 600.0 # results in 3 to 10 minute delay
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
return uniform(delay / 3, delay)
| 21,319 | 34.298013 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/emr.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import warnings
from time import sleep
from typing import Any
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils.helpers import prune_dict
class EmrHook(AwsBaseHook):
"""
Interact with Amazon Elastic MapReduce Service (EMR).
Provide thick wrapper around :external+boto3:py:class:`boto3.client("emr") <EMR.Client>`.
:param emr_conn_id: :ref:`Amazon Elastic MapReduce Connection <howto/connection:emr>`.
This attribute is only necessary when using
the :meth:`airflow.providers.amazon.aws.hooks.emr.EmrHook.create_job_flow`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
conn_name_attr = "emr_conn_id"
default_conn_name = "emr_default"
conn_type = "emr"
hook_name = "Amazon Elastic MapReduce"
def __init__(self, emr_conn_id: str | None = default_conn_name, *args, **kwargs) -> None:
self.emr_conn_id = emr_conn_id
kwargs["client_type"] = "emr"
super().__init__(*args, **kwargs)
def get_cluster_id_by_name(self, emr_cluster_name: str, cluster_states: list[str]) -> str | None:
"""
Fetch id of EMR cluster with given name and (optional) states; returns only if single id is found.
.. seealso::
- :external+boto3:py:meth:`EMR.Client.list_clusters`
:param emr_cluster_name: Name of a cluster to find
:param cluster_states: State(s) of cluster to find
:return: id of the EMR cluster
"""
response_iterator = (
self.get_conn().get_paginator("list_clusters").paginate(ClusterStates=cluster_states)
)
matching_clusters = [
cluster
for page in response_iterator
for cluster in page["Clusters"]
if cluster["Name"] == emr_cluster_name
]
if len(matching_clusters) == 1:
cluster_id = matching_clusters[0]["Id"]
self.log.info("Found cluster name = %s id = %s", emr_cluster_name, cluster_id)
return cluster_id
elif len(matching_clusters) > 1:
raise AirflowException(f"More than one cluster found for name {emr_cluster_name}")
else:
self.log.info("No cluster found for name %s", emr_cluster_name)
return None
def create_job_flow(self, job_flow_overrides: dict[str, Any]) -> dict[str, Any]:
"""
Create and start running a new cluster (job flow).
.. seealso::
- :external+boto3:py:meth:`EMR.Client.run_job_flow`
This method uses ``EmrHook.emr_conn_id`` to receive the initial Amazon EMR cluster configuration.
If ``EmrHook.emr_conn_id`` is empty or the connection does not exist, then an empty initial
configuration is used.
:param job_flow_overrides: Is used to overwrite the parameters in the initial Amazon EMR configuration
cluster. The resulting configuration will be used in the
:external+boto3:py:meth:`EMR.Client.run_job_flow`.
.. seealso::
- :ref:`Amazon Elastic MapReduce Connection <howto/connection:emr>`
- :external+boto3:py:meth:`EMR.Client.run_job_flow`
- `API RunJobFlow <https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html>`_
"""
config = {}
if self.emr_conn_id:
try:
emr_conn = self.get_connection(self.emr_conn_id)
except AirflowNotFoundException:
warnings.warn(
f"Unable to find {self.hook_name} Connection ID {self.emr_conn_id!r}, "
"using an empty initial configuration. If you want to get rid of this warning "
"message please provide a valid `emr_conn_id` or set it to None.",
UserWarning,
stacklevel=2,
)
else:
if emr_conn.conn_type and emr_conn.conn_type != self.conn_type:
warnings.warn(
f"{self.hook_name} Connection expected connection type {self.conn_type!r}, "
f"Connection {self.emr_conn_id!r} has conn_type={emr_conn.conn_type!r}. "
f"This connection might not work correctly.",
UserWarning,
stacklevel=2,
)
config = emr_conn.extra_dejson.copy()
config.update(job_flow_overrides)
response = self.get_conn().run_job_flow(**config)
return response
def add_job_flow_steps(
self,
job_flow_id: str,
steps: list[dict] | str | None = None,
wait_for_completion: bool = False,
waiter_delay: int | None = None,
waiter_max_attempts: int | None = None,
execution_role_arn: str | None = None,
) -> list[str]:
"""
Add new steps to a running cluster.
.. seealso::
- :external+boto3:py:meth:`EMR.Client.add_job_flow_steps`
:param job_flow_id: The id of the job flow to which the steps are being added
:param steps: A list of the steps to be executed by the job flow
:param wait_for_completion: If True, wait for the steps to be completed. Default is False
:param waiter_delay: The amount of time in seconds to wait between attempts. Default is 5
:param waiter_max_attempts: The maximum number of attempts to be made. Default is 100
:param execution_role_arn: The ARN of the runtime role for a step on the cluster.
"""
config = {}
if execution_role_arn:
config["ExecutionRoleArn"] = execution_role_arn
response = self.get_conn().add_job_flow_steps(JobFlowId=job_flow_id, Steps=steps, **config)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Adding steps failed: {response}")
self.log.info("Steps %s added to JobFlow", response["StepIds"])
if wait_for_completion:
waiter = self.get_conn().get_waiter("step_complete")
for step_id in response["StepIds"]:
waiter.wait(
ClusterId=job_flow_id,
StepId=step_id,
WaiterConfig=prune_dict(
{
"Delay": waiter_delay,
"MaxAttempts": waiter_max_attempts,
}
),
)
return response["StepIds"]
def test_connection(self):
"""
Return failed state for test Amazon Elastic MapReduce Connection (untestable).
We need to overwrite this method because this hook is based on
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsGenericHook`,
otherwise it will try to test connection to AWS STS by using the default boto3 credential strategy.
"""
msg = (
f"{self.hook_name!r} Airflow Connection cannot be tested, by design it stores "
f"only key/value pairs and does not make a connection to an external resource."
)
return False, msg
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom UI field behaviour for Amazon Elastic MapReduce Connection."""
return {
"hidden_fields": ["host", "schema", "port", "login", "password"],
"relabeling": {
"extra": "Run Job Flow Configuration",
},
"placeholders": {
"extra": json.dumps(
{
"Name": "MyClusterName",
"ReleaseLabel": "emr-5.36.0",
"Applications": [{"Name": "Spark"}],
"Instances": {
"InstanceGroups": [
{
"Name": "Primary node",
"Market": "SPOT",
"InstanceRole": "MASTER",
"InstanceType": "m5.large",
"InstanceCount": 1,
},
],
"KeepJobFlowAliveWhenNoSteps": False,
"TerminationProtected": False,
},
"StepConcurrencyLevel": 2,
},
indent=2,
),
},
}
class EmrServerlessHook(AwsBaseHook):
"""
Interact with Amazon EMR Serverless.
Provide thin wrapper around :py:class:`boto3.client("emr-serverless") <EMRServerless.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
JOB_INTERMEDIATE_STATES = {"PENDING", "RUNNING", "SCHEDULED", "SUBMITTED"}
JOB_FAILURE_STATES = {"FAILED", "CANCELLING", "CANCELLED"}
JOB_SUCCESS_STATES = {"SUCCESS"}
JOB_TERMINAL_STATES = JOB_SUCCESS_STATES.union(JOB_FAILURE_STATES)
APPLICATION_INTERMEDIATE_STATES = {"CREATING", "STARTING", "STOPPING"}
APPLICATION_FAILURE_STATES = {"STOPPED", "TERMINATED"}
APPLICATION_SUCCESS_STATES = {"CREATED", "STARTED"}
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs["client_type"] = "emr-serverless"
super().__init__(*args, **kwargs)
def cancel_running_jobs(self, application_id: str, waiter_config: dict = {}):
"""
List all jobs in an intermediate state, cancel them, then wait for those jobs to reach terminal state.
Note: if new jobs are triggered while this operation is ongoing,
it's going to time out and return an error.
"""
paginator = self.conn.get_paginator("list_job_runs")
results_per_response = 50
iterator = paginator.paginate(
applicationId=application_id,
states=list(self.JOB_INTERMEDIATE_STATES),
PaginationConfig={
"PageSize": results_per_response,
},
)
count = 0
for r in iterator:
job_ids = [jr["id"] for jr in r["jobRuns"]]
count += len(job_ids)
if len(job_ids) > 0:
self.log.info(
"Cancelling %s pending job(s) for the application %s so that it can be stopped",
len(job_ids),
application_id,
)
for job_id in job_ids:
self.conn.cancel_job_run(applicationId=application_id, jobRunId=job_id)
if count > 0:
self.log.info("now waiting for the %s cancelled job(s) to terminate", count)
self.get_waiter("no_job_running").wait(
applicationId=application_id,
states=list(self.JOB_INTERMEDIATE_STATES.union({"CANCELLING"})),
WaiterConfig=waiter_config,
)
class EmrContainerHook(AwsBaseHook):
"""
Interact with Amazon EMR Containers (Amazon EMR on EKS).
Provide thick wrapper around :py:class:`boto3.client("emr-containers") <EMRContainers.Client>`.
:param virtual_cluster_id: Cluster ID of the EMR on EKS virtual cluster
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
INTERMEDIATE_STATES = (
"PENDING",
"SUBMITTED",
"RUNNING",
)
FAILURE_STATES = (
"FAILED",
"CANCELLED",
"CANCEL_PENDING",
)
SUCCESS_STATES = ("COMPLETED",)
TERMINAL_STATES = (
"COMPLETED",
"FAILED",
"CANCELLED",
"CANCEL_PENDING",
)
def __init__(self, *args: Any, virtual_cluster_id: str | None = None, **kwargs: Any) -> None:
super().__init__(client_type="emr-containers", *args, **kwargs) # type: ignore
self.virtual_cluster_id = virtual_cluster_id
def create_emr_on_eks_cluster(
self,
virtual_cluster_name: str,
eks_cluster_name: str,
eks_namespace: str,
tags: dict | None = None,
) -> str:
response = self.conn.create_virtual_cluster(
name=virtual_cluster_name,
containerProvider={
"id": eks_cluster_name,
"type": "EKS",
"info": {"eksInfo": {"namespace": eks_namespace}},
},
tags=tags or {},
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Create EMR EKS Cluster failed: {response}")
else:
self.log.info(
"Create EMR EKS Cluster success - virtual cluster id %s",
response["id"],
)
return response["id"]
def submit_job(
self,
name: str,
execution_role_arn: str,
release_label: str,
job_driver: dict,
configuration_overrides: dict | None = None,
client_request_token: str | None = None,
tags: dict | None = None,
) -> str:
"""
Submit a job to the EMR Containers API and return the job ID.
A job run is a unit of work, such as a Spark jar, PySpark script,
or SparkSQL query, that you submit to Amazon EMR on EKS.
.. seealso::
- :external+boto3:py:meth:`EMRContainers.Client.start_job_run`
:param name: The name of the job run.
:param execution_role_arn: The IAM role ARN associated with the job run.
:param release_label: The Amazon EMR release version to use for the job run.
:param job_driver: Job configuration details, e.g. the Spark job parameters.
:param configuration_overrides: The configuration overrides for the job run,
specifically either application configuration or monitoring configuration.
:param client_request_token: The client idempotency token of the job run request.
Use this if you want to specify a unique ID to prevent two jobs from getting started.
:param tags: The tags assigned to job runs.
:return: The ID of the job run request.
"""
params = {
"name": name,
"virtualClusterId": self.virtual_cluster_id,
"executionRoleArn": execution_role_arn,
"releaseLabel": release_label,
"jobDriver": job_driver,
"configurationOverrides": configuration_overrides or {},
"tags": tags or {},
}
if client_request_token:
params["clientToken"] = client_request_token
response = self.conn.start_job_run(**params)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Start Job Run failed: {response}")
else:
self.log.info(
"Start Job Run success - Job Id %s and virtual cluster id %s",
response["id"],
response["virtualClusterId"],
)
return response["id"]
def get_job_failure_reason(self, job_id: str) -> str | None:
"""
Fetch the reason for a job failure (e.g. error message). Returns None or reason string.
.. seealso::
- :external+boto3:py:meth:`EMRContainers.Client.describe_job_run`
:param job_id: The ID of the job run request.
"""
reason = None # We absorb any errors if we can't retrieve the job status
try:
response = self.conn.describe_job_run(
virtualClusterId=self.virtual_cluster_id,
id=job_id,
)
failure_reason = response["jobRun"]["failureReason"]
state_details = response["jobRun"]["stateDetails"]
reason = f"{failure_reason} - {state_details}"
except KeyError:
self.log.error("Could not get status of the EMR on EKS job")
except ClientError as ex:
self.log.error("AWS request failed, check logs for more info: %s", ex)
return reason
def check_query_status(self, job_id: str) -> str | None:
"""
Fetch the status of submitted job run. Returns None or one of valid query states.
.. seealso::
- :external+boto3:py:meth:`EMRContainers.Client.describe_job_run`
:param job_id: The ID of the job run request.
"""
try:
response = self.conn.describe_job_run(
virtualClusterId=self.virtual_cluster_id,
id=job_id,
)
return response["jobRun"]["state"]
except self.conn.exceptions.ResourceNotFoundException:
# If the job is not found, we raise an exception as something fatal has happened.
raise AirflowException(f"Job ID {job_id} not found on Virtual Cluster {self.virtual_cluster_id}")
except ClientError as ex:
# If we receive a generic ClientError, we swallow the exception so that the
self.log.error("AWS request failed, check logs for more info: %s", ex)
return None
def poll_query_status(
self,
job_id: str,
poll_interval: int = 30,
max_polling_attempts: int | None = None,
) -> str | None:
"""
Poll the status of submitted job run until query state reaches final state; returns the final state.
:param job_id: The ID of the job run request.
:param poll_interval: Time (in seconds) to wait between calls to check query status on EMR
:param max_polling_attempts: Number of times to poll for query state before function exits
"""
try_number = 1
final_query_state = None # Query state when query reaches final state or max_polling_attempts reached
while True:
query_state = self.check_query_status(job_id)
if query_state is None:
self.log.info("Try %s: Invalid query state. Retrying again", try_number)
elif query_state in self.TERMINAL_STATES:
self.log.info("Try %s: Query execution completed. Final state is %s", try_number, query_state)
final_query_state = query_state
break
else:
self.log.info("Try %s: Query is still in non-terminal state - %s", try_number, query_state)
if (
max_polling_attempts and try_number >= max_polling_attempts
): # Break loop if max_polling_attempts reached
final_query_state = query_state
break
try_number += 1
sleep(poll_interval)
return final_query_state
def stop_query(self, job_id: str) -> dict:
"""
Cancel the submitted job_run.
.. seealso::
- :external+boto3:py:meth:`EMRContainers.Client.cancel_job_run`
:param job_id: The ID of the job run to cancel.
"""
return self.conn.cancel_job_run(
virtualClusterId=self.virtual_cluster_id,
id=job_id,
)
| 20,429 | 39.058824 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/glue.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import time
import boto3
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
DEFAULT_LOG_SUFFIX = "output"
ERROR_LOG_SUFFIX = "error"
class GlueJobHook(AwsBaseHook):
"""
Interact with AWS Glue.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("glue") <Glue.Client>`.
:param s3_bucket: S3 bucket where logs and local etl script will be uploaded
:param job_name: unique job name per AWS account
:param desc: job description
:param concurrent_run_limit: The maximum number of concurrent runs allowed for a job
:param script_location: path to etl script on s3
:param retry_limit: Maximum number of times to retry this job if it fails
:param num_of_dpus: Number of AWS Glue DPUs to allocate to this Job
:param region_name: aws region name (example: us-east-1)
:param iam_role_name: AWS IAM Role for Glue Job Execution
:param create_job_kwargs: Extra arguments for Glue Job Creation
:param update_config: Update job configuration on Glue (default: False)
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
class LogContinuationTokens:
"""Used to hold the continuation tokens when reading logs from both streams Glue Jobs write to."""
def __init__(self):
self.output_stream_continuation: str | None = None
self.error_stream_continuation: str | None = None
def __init__(
self,
s3_bucket: str | None = None,
job_name: str | None = None,
desc: str | None = None,
concurrent_run_limit: int = 1,
script_location: str | None = None,
retry_limit: int = 0,
num_of_dpus: int | float | None = None,
iam_role_name: str | None = None,
create_job_kwargs: dict | None = None,
update_config: bool = False,
job_poll_interval: int | float = 6,
*args,
**kwargs,
):
self.job_name = job_name
self.desc = desc
self.concurrent_run_limit = concurrent_run_limit
self.script_location = script_location
self.retry_limit = retry_limit
self.s3_bucket = s3_bucket
self.role_name = iam_role_name
self.s3_glue_logs = "logs/glue-logs/"
self.create_job_kwargs = create_job_kwargs or {}
self.update_config = update_config
self.job_poll_interval = job_poll_interval
worker_type_exists = "WorkerType" in self.create_job_kwargs
num_workers_exists = "NumberOfWorkers" in self.create_job_kwargs
if worker_type_exists and num_workers_exists:
if num_of_dpus is not None:
raise ValueError("Cannot specify num_of_dpus with custom WorkerType")
elif not worker_type_exists and num_workers_exists:
raise ValueError("Need to specify custom WorkerType when specifying NumberOfWorkers")
elif worker_type_exists and not num_workers_exists:
raise ValueError("Need to specify NumberOfWorkers when specifying custom WorkerType")
elif num_of_dpus is None:
self.num_of_dpus: int | float = 10
else:
self.num_of_dpus = num_of_dpus
kwargs["client_type"] = "glue"
super().__init__(*args, **kwargs)
def create_glue_job_config(self) -> dict:
default_command = {
"Name": "glueetl",
"ScriptLocation": self.script_location,
}
command = self.create_job_kwargs.pop("Command", default_command)
execution_role = self.get_iam_execution_role()
config = {
"Name": self.job_name,
"Description": self.desc,
"Role": execution_role["Role"]["Arn"],
"ExecutionProperty": {"MaxConcurrentRuns": self.concurrent_run_limit},
"Command": command,
"MaxRetries": self.retry_limit,
**self.create_job_kwargs,
}
if hasattr(self, "num_of_dpus"):
config["MaxCapacity"] = self.num_of_dpus
if self.s3_bucket is not None:
config["LogUri"] = f"s3://{self.s3_bucket}/{self.s3_glue_logs}{self.job_name}"
return config
def list_jobs(self) -> list:
"""
Get list of Jobs.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_jobs`
"""
return self.conn.get_jobs()
def get_iam_execution_role(self) -> dict:
"""Get IAM Role for job execution."""
try:
iam_client = self.get_session(region_name=self.region_name).client(
"iam", endpoint_url=self.conn_config.endpoint_url, config=self.config, verify=self.verify
)
glue_execution_role = iam_client.get_role(RoleName=self.role_name)
self.log.info("Iam Role Name: %s", self.role_name)
return glue_execution_role
except Exception as general_error:
self.log.error("Failed to create aws glue job, error: %s", general_error)
raise
def initialize_job(
self,
script_arguments: dict | None = None,
run_kwargs: dict | None = None,
) -> dict[str, str]:
"""
Initializes connection with AWS Glue to run job.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.start_job_run`
"""
script_arguments = script_arguments or {}
run_kwargs = run_kwargs or {}
try:
if self.update_config:
job_name = self.create_or_update_glue_job()
else:
job_name = self.get_or_create_glue_job()
return self.conn.start_job_run(JobName=job_name, Arguments=script_arguments, **run_kwargs)
except Exception as general_error:
self.log.error("Failed to run aws glue job, error: %s", general_error)
raise
def get_job_state(self, job_name: str, run_id: str) -> str:
"""
Get state of the Glue job; the job state can be running, finished, failed, stopped or timeout.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_job_run`
:param job_name: unique job name per AWS account
:param run_id: The job-run ID of the predecessor job run
:return: State of the Glue job
"""
job_run = self.conn.get_job_run(JobName=job_name, RunId=run_id, PredecessorsIncluded=True)
return job_run["JobRun"]["JobRunState"]
async def async_get_job_state(self, job_name: str, run_id: str) -> str:
"""The async version of get_job_state."""
async with self.async_conn as client:
job_run = await client.get_job_run(JobName=job_name, RunId=run_id)
return job_run["JobRun"]["JobRunState"]
def print_job_logs(
self,
job_name: str,
run_id: str,
continuation_tokens: LogContinuationTokens,
):
"""
Prints the latest job logs to the Airflow task log and updates the continuation tokens.
:param continuation_tokens: the tokens where to resume from when reading logs.
The object gets updated with the new tokens by this method.
"""
log_client = boto3.client("logs")
paginator = log_client.get_paginator("filter_log_events")
def display_logs_from(log_group: str, continuation_token: str | None) -> str | None:
"""Internal method to mutualize iteration over the 2 different log streams glue jobs write to."""
fetched_logs = []
next_token = continuation_token
try:
for response in paginator.paginate(
logGroupName=log_group,
logStreamNames=[run_id],
PaginationConfig={"StartingToken": continuation_token},
):
fetched_logs.extend([event["message"] for event in response["events"]])
# if the response is empty there is no nextToken in it
next_token = response.get("nextToken") or next_token
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
# we land here when the log groups/streams don't exist yet
self.log.warning(
"No new Glue driver logs so far.\nIf this persists, check the CloudWatch dashboard "
f"at: https://{self.conn_region_name}.console.aws.amazon.com/cloudwatch/home"
)
else:
raise
if len(fetched_logs):
# Add a tab to indent those logs and distinguish them from airflow logs.
# Log lines returned already contain a newline character at the end.
messages = "\t".join(fetched_logs)
self.log.info("Glue Job Run %s Logs:\n\t%s", log_group, messages)
else:
self.log.info("No new log from the Glue Job in %s", log_group)
return next_token
log_group_prefix = self.conn.get_job_run(JobName=job_name, RunId=run_id)["JobRun"]["LogGroupName"]
log_group_default = f"{log_group_prefix}/{DEFAULT_LOG_SUFFIX}"
log_group_error = f"{log_group_prefix}/{ERROR_LOG_SUFFIX}"
# one would think that the error log group would contain only errors, but it actually contains
# a lot of interesting logs too, so it's valuable to have both
continuation_tokens.output_stream_continuation = display_logs_from(
log_group_default, continuation_tokens.output_stream_continuation
)
continuation_tokens.error_stream_continuation = display_logs_from(
log_group_error, continuation_tokens.error_stream_continuation
)
def job_completion(self, job_name: str, run_id: str, verbose: bool = False) -> dict[str, str]:
"""
Wait until Glue job with job_name finishes; return final state if finished or raises AirflowException.
:param job_name: unique job name per AWS account
:param run_id: The job-run ID of the predecessor job run
:param verbose: If True, more Glue Job Run logs show in the Airflow Task Logs. (default: False)
:return: Dict of JobRunState and JobRunId
"""
next_log_tokens = self.LogContinuationTokens()
while True:
job_run_state = self.get_job_state(job_name, run_id)
ret = self._handle_state(job_run_state, job_name, run_id, verbose, next_log_tokens)
if ret:
return ret
else:
time.sleep(self.job_poll_interval)
async def async_job_completion(self, job_name: str, run_id: str, verbose: bool = False) -> dict[str, str]:
"""
Wait until Glue job with job_name finishes; return final state if finished or raises AirflowException.
:param job_name: unique job name per AWS account
:param run_id: The job-run ID of the predecessor job run
:param verbose: If True, more Glue Job Run logs show in the Airflow Task Logs. (default: False)
:return: Dict of JobRunState and JobRunId
"""
next_log_tokens = self.LogContinuationTokens()
while True:
job_run_state = await self.async_get_job_state(job_name, run_id)
ret = self._handle_state(job_run_state, job_name, run_id, verbose, next_log_tokens)
if ret:
return ret
else:
await asyncio.sleep(self.job_poll_interval)
def _handle_state(
self,
state: str,
job_name: str,
run_id: str,
verbose: bool,
next_log_tokens: GlueJobHook.LogContinuationTokens,
) -> dict | None:
"""Helper function to process Glue Job state while polling. Used by both sync and async methods."""
failed_states = ["FAILED", "TIMEOUT"]
finished_states = ["SUCCEEDED", "STOPPED"]
if verbose:
self.print_job_logs(
job_name=job_name,
run_id=run_id,
continuation_tokens=next_log_tokens,
)
if state in finished_states:
self.log.info("Exiting Job %s Run State: %s", run_id, state)
return {"JobRunState": state, "JobRunId": run_id}
if state in failed_states:
job_error_message = f"Exiting Job {run_id} Run State: {state}"
self.log.info(job_error_message)
raise AirflowException(job_error_message)
else:
self.log.info(
"Polling for AWS Glue Job %s current run state with status %s",
job_name,
state,
)
return None
def has_job(self, job_name) -> bool:
"""
Checks if the job already exists.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_job`
:param job_name: unique job name per AWS account
:return: Returns True if the job already exists and False if not.
"""
self.log.info("Checking if job already exists: %s", job_name)
try:
self.conn.get_job(JobName=job_name)
return True
except self.conn.exceptions.EntityNotFoundException:
return False
def update_job(self, **job_kwargs) -> bool:
"""
Updates job configurations.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.update_job`
:param job_kwargs: Keyword args that define the configurations used for the job
:return: True if job was updated and false otherwise
"""
job_name = job_kwargs.pop("Name")
current_job = self.conn.get_job(JobName=job_name)["Job"]
update_config = {
key: value for key, value in job_kwargs.items() if current_job.get(key) != job_kwargs[key]
}
if update_config != {}:
self.log.info("Updating job: %s", job_name)
self.conn.update_job(JobName=job_name, JobUpdate=job_kwargs)
self.log.info("Updated configurations: %s", update_config)
return True
else:
return False
def get_or_create_glue_job(self) -> str | None:
"""
Get (or creates) and returns the Job name.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.create_job`
:return:Name of the Job
"""
if self.has_job(self.job_name):
return self.job_name
config = self.create_glue_job_config()
self.log.info("Creating job: %s", self.job_name)
self.conn.create_job(**config)
return self.job_name
def create_or_update_glue_job(self) -> str | None:
"""
Creates (or updates) and returns the Job name.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.update_job`
- :external+boto3:py:meth:`Glue.Client.create_job`
:return:Name of the Job
"""
config = self.create_glue_job_config()
if self.has_job(self.job_name):
self.update_job(**config)
else:
self.log.info("Creating job: %s", self.job_name)
self.conn.create_job(**config)
return self.job_name
| 16,313 | 38.501211 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/sagemaker.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import collections
import os
import re
import tarfile
import tempfile
import time
from collections import Counter
from datetime import datetime
from functools import partial
from typing import Any, Callable, Generator, cast
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.utils.tags import format_tags
from airflow.utils import timezone
class LogState:
"""Enum-style class holding all possible states of CloudWatch log streams.
https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.LogState
"""
STARTING = 1
WAIT_IN_PROGRESS = 2
TAILING = 3
JOB_COMPLETE = 4
COMPLETE = 5
# Position is a tuple that includes the last read timestamp and the number of items that were read
# at that time. This is used to figure out which event to start with on the next read.
Position = collections.namedtuple("Position", ["timestamp", "skip"])
def argmin(arr, f: Callable) -> int | None:
"""Given callable ``f``, find index in ``arr`` to minimize ``f(arr[i])``.
None is returned if ``arr`` is empty.
"""
min_value = None
min_idx = None
for idx, item in enumerate(arr):
if item is not None:
if min_value is None or f(item) < min_value:
min_value = f(item)
min_idx = idx
return min_idx
def secondary_training_status_changed(current_job_description: dict, prev_job_description: dict) -> bool:
"""Check if training job's secondary status message has changed.
:param current_job_description: Current job description, returned from DescribeTrainingJob call.
:param prev_job_description: Previous job description, returned from DescribeTrainingJob call.
:return: Whether the secondary status message of a training job changed or not.
"""
current_secondary_status_transitions = current_job_description.get("SecondaryStatusTransitions")
if current_secondary_status_transitions is None or len(current_secondary_status_transitions) == 0:
return False
prev_job_secondary_status_transitions = (
prev_job_description.get("SecondaryStatusTransitions") if prev_job_description is not None else None
)
last_message = (
prev_job_secondary_status_transitions[-1]["StatusMessage"]
if prev_job_secondary_status_transitions is not None
and len(prev_job_secondary_status_transitions) > 0
else ""
)
message = current_job_description["SecondaryStatusTransitions"][-1]["StatusMessage"]
return message != last_message
def secondary_training_status_message(
job_description: dict[str, list[Any]], prev_description: dict | None
) -> str:
"""Format string containing start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:param prev_description: Previous job description from DescribeTrainingJob call
:return: Job status string to be printed.
"""
current_transitions = job_description.get("SecondaryStatusTransitions")
if current_transitions is None or len(current_transitions) == 0:
return ""
prev_transitions_num = 0
if prev_description is not None:
if prev_description.get("SecondaryStatusTransitions") is not None:
prev_transitions_num = len(prev_description["SecondaryStatusTransitions"])
transitions_to_print = (
current_transitions[-1:]
if len(current_transitions) == prev_transitions_num
else current_transitions[prev_transitions_num - len(current_transitions) :]
)
status_strs = []
for transition in transitions_to_print:
message = transition["StatusMessage"]
time_str = timezone.convert_to_utc(cast(datetime, job_description["LastModifiedTime"])).strftime(
"%Y-%m-%d %H:%M:%S"
)
status_strs.append(f"{time_str} {transition['Status']} - {message}")
return "\n".join(status_strs)
class SageMakerHook(AwsBaseHook):
"""Interact with Amazon SageMaker.
Provide thick wrapper around
:external+boto3:py:class:`boto3.client("sagemaker") <SageMaker.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
non_terminal_states = {"InProgress", "Stopping"}
endpoint_non_terminal_states = {"Creating", "Updating", "SystemUpdating", "RollingBack", "Deleting"}
pipeline_non_terminal_states = {"Executing", "Stopping"}
failed_states = {"Failed"}
def __init__(self, *args, **kwargs):
super().__init__(client_type="sagemaker", *args, **kwargs)
self.s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
self.logs_hook = AwsLogsHook(aws_conn_id=self.aws_conn_id)
def tar_and_s3_upload(self, path: str, key: str, bucket: str) -> None:
"""Tar the local file or directory and upload to s3.
:param path: local file or directory
:param key: s3 key
:param bucket: s3 bucket
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode="w:gz", fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True)
def configure_s3_resources(self, config: dict) -> None:
"""Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
"""
s3_operations = config.pop("S3Operations", None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get("S3CreateBucket", [])
upload_ops = s3_operations.get("S3Upload", [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op["Bucket"])
for op in upload_ops:
if op["Tar"]:
self.tar_and_s3_upload(op["Path"], op["Key"], op["Bucket"])
else:
self.s3_hook.load_file(op["Path"], op["Key"], op["Bucket"])
def check_s3_url(self, s3url: str) -> bool:
"""Check if an S3 URL exists.
:param s3url: S3 url
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(f"The input S3 Bucket {bucket} does not exist ")
if (
key
and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)
and not self.s3_hook.check_for_prefix(prefix=key, bucket_name=bucket, delimiter="/")
):
# check if s3 key exists in the case user provides a single file
# or if s3 prefix exists in the case user provides multiple files in
# a prefix
raise AirflowException(
f"The input S3 Key or Prefix {s3url} does not exist in the Bucket {bucket}"
)
return True
def check_training_config(self, training_config: dict) -> None:
"""Check if a training configuration is valid.
:param training_config: training_config
"""
if "InputDataConfig" in training_config:
for channel in training_config["InputDataConfig"]:
if "S3DataSource" in channel["DataSource"]:
self.check_s3_url(channel["DataSource"]["S3DataSource"]["S3Uri"])
def check_tuning_config(self, tuning_config: dict) -> None:
"""Check if a tuning configuration is valid.
:param tuning_config: tuning_config
"""
for channel in tuning_config["TrainingJobDefinition"]["InputDataConfig"]:
if "S3DataSource" in channel["DataSource"]:
self.check_s3_url(channel["DataSource"]["S3DataSource"]["S3Uri"])
def multi_stream_iter(self, log_group: str, streams: list, positions=None) -> Generator:
"""Iterate over the available events.
The events coming from a set of log streams in a single log group
interleaving the events from each stream so they're yielded in timestamp order.
:param log_group: The name of the log group.
:param streams: A list of the log stream names. The position of the stream in this list is
the stream number.
:param positions: A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
:return: A tuple of (stream number, cloudwatch log event).
"""
positions = positions or {s: Position(timestamp=0, skip=0) for s in streams}
event_iters = [
self.logs_hook.get_log_events(log_group, s, positions[s].timestamp, positions[s].skip)
for s in streams
]
events: list[Any | None] = []
for event_stream in event_iters:
if not event_stream:
events.append(None)
continue
try:
events.append(next(event_stream))
except StopIteration:
events.append(None)
while any(events):
i = argmin(events, lambda x: x["timestamp"] if x else 9999999999) or 0
yield i, events[i]
try:
events[i] = next(event_iters[i])
except StopIteration:
events[i] = None
def create_training_job(
self,
config: dict,
wait_for_completion: bool = True,
print_log: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""Start a model training job.
After training completes, Amazon SageMaker saves the resulting model
artifacts to an Amazon S3 location that you specify.
:param config: the config for training
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(
config["TrainingJobName"],
self.non_terminal_states,
self.failed_states,
wait_for_completion,
check_interval,
max_ingestion_time,
)
elif wait_for_completion:
describe_response = self.check_status(
config["TrainingJobName"],
"TrainingJobStatus",
self.describe_training_job,
check_interval,
max_ingestion_time,
)
billable_time = (
describe_response["TrainingEndTime"] - describe_response["TrainingStartTime"]
) * describe_response["ResourceConfig"]["InstanceCount"]
self.log.info("Billable seconds: %d", int(billable_time.total_seconds()) + 1)
return response
def create_tuning_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""Start a hyperparameter tuning job.
A hyperparameter tuning job finds the best version of a model by running
many training jobs on your dataset using the algorithm you choose and
values for hyperparameters within ranges that you specify. It then
chooses the hyperparameter values that result in a model that performs
the best, as measured by an objective metric that you choose.
:param config: the config for tuning
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to tuning job creation
"""
self.check_tuning_config(config)
response = self.get_conn().create_hyper_parameter_tuning_job(**config)
if wait_for_completion:
self.check_status(
config["HyperParameterTuningJobName"],
"HyperParameterTuningJobStatus",
self.describe_tuning_job,
check_interval,
max_ingestion_time,
)
return response
def create_transform_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""Start a transform job.
A transform job uses a trained model to get inferences on a dataset and
saves these results to an Amazon S3 location that you specify.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_transform_job`
:param config: the config for transform job
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to transform job creation
"""
if "S3DataSource" in config["TransformInput"]["DataSource"]:
self.check_s3_url(config["TransformInput"]["DataSource"]["S3DataSource"]["S3Uri"])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(
config["TransformJobName"],
"TransformJobStatus",
self.describe_transform_job,
check_interval,
max_ingestion_time,
)
return response
def create_processing_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""Use Amazon SageMaker Processing to analyze data and evaluate models.
With Processing, you can use a simplified, managed experience on
SageMaker to run your data processing workloads, such as feature
engineering, data validation, model evaluation, and model
interpretation.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_processing_job`
:param config: the config for processing job
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to transform job creation
"""
response = self.get_conn().create_processing_job(**config)
if wait_for_completion:
self.check_status(
config["ProcessingJobName"],
"ProcessingJobStatus",
self.describe_processing_job,
check_interval,
max_ingestion_time,
)
return response
def create_model(self, config: dict):
"""Create a model in Amazon SageMaker.
In the request, you name the model and describe a primary container. For
the primary container, you specify the Docker image that contains
inference code, artifacts (from prior training), and a custom
environment map that the inference code uses when you deploy the model
for predictions.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_model`
:param config: the config for model
:return: A response to model creation
"""
return self.get_conn().create_model(**config)
def create_endpoint_config(self, config: dict):
"""Create an endpoint configuration to deploy models.
In the configuration, you identify one or more models, created using the
CreateModel API, to deploy and the resources that you want Amazon
SageMaker to provision.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_endpoint_config`
- :class:`airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.create_model`
- :class:`airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.create_endpoint`
:param config: the config for endpoint-config
:return: A response to endpoint config creation
"""
return self.get_conn().create_endpoint_config(**config)
def create_endpoint(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""Create an endpoint from configuration.
When you create a serverless endpoint, SageMaker provisions and manages
the compute resources for you. Then, you can make inference requests to
the endpoint and receive model predictions in response. SageMaker scales
the compute resources up and down as needed to handle your request traffic.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_endpoint`
- :class:`airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.create_endpoint`
:param config: the config for endpoint
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(
config["EndpointName"],
"EndpointStatus",
self.describe_endpoint,
check_interval,
max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states,
)
return response
def update_endpoint(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""Deploy the config in the request and switch to using the new endpoint.
Resources provisioned for the endpoint using the previous EndpointConfig
are deleted (there is no availability loss).
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.update_endpoint`
:param config: the config for endpoint
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to endpoint update
"""
response = self.get_conn().update_endpoint(**config)
if wait_for_completion:
self.check_status(
config["EndpointName"],
"EndpointStatus",
self.describe_endpoint,
check_interval,
max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states,
)
return response
def describe_training_job(self, name: str):
"""Get the training job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_training_job`
:param name: the name of the training job
:return: A dict contains all the training job info
"""
return self.get_conn().describe_training_job(TrainingJobName=name)
def describe_training_job_with_log(
self,
job_name: str,
positions,
stream_names: list,
instance_count: int,
state: int,
last_description: dict,
last_describe_job_call: float,
):
"""Get the associated training job info and print CloudWatch logs."""
log_group = "/aws/sagemaker/TrainingJobs"
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.logs_hook.get_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + "/",
orderBy="LogStreamName",
limit=instance_count,
)
stream_names = [s["logStreamName"] for s in streams["logStreams"]]
positions.update(
[(s, Position(timestamp=0, skip=0)) for s in stream_names if s not in positions]
)
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event["message"])
ts, count = positions[stream_names[idx]]
if event["timestamp"] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event["timestamp"], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.monotonic() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.monotonic()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description["TrainingJobStatus"]
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call
def describe_tuning_job(self, name: str) -> dict:
"""Get the tuning job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_hyper_parameter_tuning_job`
:param name: the name of the tuning job
:return: A dict contains all the tuning job info
"""
return self.get_conn().describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)
def describe_model(self, name: str) -> dict:
"""Get the SageMaker model info associated with the name.
:param name: the name of the SageMaker model
:return: A dict contains all the model info
"""
return self.get_conn().describe_model(ModelName=name)
def describe_transform_job(self, name: str) -> dict:
"""Get the transform job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_transform_job`
:param name: the name of the transform job
:return: A dict contains all the transform job info
"""
return self.get_conn().describe_transform_job(TransformJobName=name)
def describe_processing_job(self, name: str) -> dict:
"""Get the processing job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_processing_job`
:param name: the name of the processing job
:return: A dict contains all the processing job info
"""
return self.get_conn().describe_processing_job(ProcessingJobName=name)
def describe_endpoint_config(self, name: str) -> dict:
"""Get the endpoint config info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_endpoint_config`
:param name: the name of the endpoint config
:return: A dict contains all the endpoint config info
"""
return self.get_conn().describe_endpoint_config(EndpointConfigName=name)
def describe_endpoint(self, name: str) -> dict:
"""Get the description of an endpoint.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_endpoint`
:param name: the name of the endpoint
:return: A dict contains all the endpoint info
"""
return self.get_conn().describe_endpoint(EndpointName=name)
def check_status(
self,
job_name: str,
key: str,
describe_function: Callable,
check_interval: int,
max_ingestion_time: int | None = None,
non_terminal_states: set | None = None,
) -> dict:
"""Check status of a SageMaker resource.
:param job_name: name of the resource to check status, can be a job but
also pipeline for instance.
:param key: the key of the response dict that points to the state
:param describe_function: the function used to retrieve the status
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker resource
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker resources that run longer than this will fail. Setting
this to None implies no timeout for any SageMaker resource.
:param non_terminal_states: the set of nonterminal states
:return: response of describe call after resource is done
"""
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
while True:
time.sleep(check_interval)
sec += check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info("Resource still running for %s seconds... current status is %s", sec, status)
except KeyError:
raise AirflowException("Could not get status of the SageMaker resource")
except ClientError:
raise AirflowException("AWS request failed, check logs for more info")
if status in self.failed_states:
raise AirflowException(f"SageMaker resource failed because {response['FailureReason']}")
elif status not in non_terminal_states:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the resource gets killed if the max ingestion time is exceeded
raise AirflowException(f"SageMaker resource took more than {max_ingestion_time} seconds")
self.log.info("SageMaker resource completed")
return response
def check_training_status_with_log(
self,
job_name: str,
non_terminal_states: set,
failed_states: set,
wait_for_completion: bool,
check_interval: int,
max_ingestion_time: int | None = None,
):
"""Display logs for a given training job.
Optionally tailing them until the job is complete.
:param job_name: name of the training job to check status and display logs for
:param non_terminal_states: the set of non_terminal states
:param failed_states: the set of failed states
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:param check_interval: The interval in seconds between polling for new log entries and job completion
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description["ResourceConfig"]["InstanceCount"]
status = description["TrainingJobStatus"]
stream_names: list = [] # The list of log streams
positions: dict = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait_for_completion == False, we never check the job status.
#
# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING
# If wait_for_completion == FALSE, the initial state is COMPLETE
# (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that
# got to Cloudwatch after the job was marked complete.
last_describe_job_call = time.monotonic()
last_description = description
while True:
time.sleep(check_interval)
sec += check_interval
state, last_description, last_describe_job_call = self.describe_training_job_with_log(
job_name,
positions,
stream_names,
instance_count,
state,
last_description,
last_describe_job_call,
)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException(f"SageMaker job took more than {max_ingestion_time} seconds")
if wait_for_completion:
status = last_description["TrainingJobStatus"]
if status in failed_states:
reason = last_description.get("FailureReason", "(No reason provided)")
raise AirflowException(f"Error training {job_name}: {status} Reason: {reason}")
billable_time = (
last_description["TrainingEndTime"] - last_description["TrainingStartTime"]
) * instance_count
self.log.info("Billable seconds: %d", int(billable_time.total_seconds()) + 1)
def list_training_jobs(
self, name_contains: str | None = None, max_results: int | None = None, **kwargs
) -> list[dict]:
"""Call boto3's ``list_training_jobs``.
The training job name and max results are configurable via arguments.
Other arguments are not, and should be provided via kwargs. Note that
boto3 expects these in CamelCase, for example:
.. code-block:: python
list_training_jobs(name_contains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_training_jobs`
:param name_contains: (optional) partial name to match
:param max_results: (optional) maximum number of results to return. None returns infinite results
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_training_jobs request
"""
config, max_results = self._preprocess_list_request_args(name_contains, max_results, **kwargs)
list_training_jobs_request = partial(self.get_conn().list_training_jobs, **config)
results = self._list_request(
list_training_jobs_request, "TrainingJobSummaries", max_results=max_results
)
return results
def list_transform_jobs(
self, name_contains: str | None = None, max_results: int | None = None, **kwargs
) -> list[dict]:
"""Call boto3's ``list_transform_jobs``.
The transform job name and max results are configurable via arguments.
Other arguments are not, and should be provided via kwargs. Note that
boto3 expects these in CamelCase, for example:
.. code-block:: python
list_transform_jobs(name_contains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_transform_jobs`
:param name_contains: (optional) partial name to match.
:param max_results: (optional) maximum number of results to return.
None returns infinite results.
:param kwargs: (optional) kwargs to boto3's list_transform_jobs method.
:return: results of the list_transform_jobs request.
"""
config, max_results = self._preprocess_list_request_args(name_contains, max_results, **kwargs)
list_transform_jobs_request = partial(self.get_conn().list_transform_jobs, **config)
results = self._list_request(
list_transform_jobs_request, "TransformJobSummaries", max_results=max_results
)
return results
def list_processing_jobs(self, **kwargs) -> list[dict]:
"""Call boto3's `list_processing_jobs`.
All arguments should be provided via kwargs. Note that boto3 expects
these in CamelCase, for example:
.. code-block:: python
list_processing_jobs(NameContains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_processing_jobs`
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_processing_jobs request
"""
list_processing_jobs_request = partial(self.get_conn().list_processing_jobs, **kwargs)
results = self._list_request(
list_processing_jobs_request, "ProcessingJobSummaries", max_results=kwargs.get("MaxResults")
)
return results
def _preprocess_list_request_args(
self, name_contains: str | None = None, max_results: int | None = None, **kwargs
) -> tuple[dict[str, Any], int | None]:
"""Preprocess arguments for boto3's ``list_*`` methods.
It will turn arguments name_contains and max_results as boto3 compliant
CamelCase format. This method also makes sure that these two arguments
are only set once.
:param name_contains: boto3 function with arguments
:param max_results: the result key to iterate over
:param kwargs: (optional) kwargs to boto3's list_* method
:return: Tuple with config dict to be passed to boto3's list_* method
and max_results parameter
"""
config = {}
if name_contains:
if "NameContains" in kwargs:
raise AirflowException("Either name_contains or NameContains can be provided, not both.")
config["NameContains"] = name_contains
if "MaxResults" in kwargs and kwargs["MaxResults"] is not None:
if max_results:
raise AirflowException("Either max_results or MaxResults can be provided, not both.")
# Unset MaxResults, we'll use the SageMakerHook's internal method for iteratively fetching results
max_results = kwargs["MaxResults"]
del kwargs["MaxResults"]
config.update(kwargs)
return config, max_results
def _list_request(
self, partial_func: Callable, result_key: str, max_results: int | None = None
) -> list[dict]:
"""Process a list request to produce results.
All AWS boto3 ``list_*`` requests return results in batches, and if the
key "NextToken" is contained in the result, there are more results to
fetch. The default AWS batch size is 10, and configurable up to 100.
This function iteratively loads all results (or up to a given maximum).
Each boto3 ``list_*`` function returns the results in a list with a
different name. The key of this structure must be given to iterate over
the results, e.g. "TransformJobSummaries" for ``list_transform_jobs()``.
:param partial_func: boto3 function with arguments
:param result_key: the result key to iterate over
:param max_results: maximum number of results to return (None = infinite)
:return: Results of the list_* request
"""
sagemaker_max_results = 100 # Fixed number set by AWS
results: list[dict] = []
next_token = None
while True:
kwargs = {}
if next_token is not None:
kwargs["NextToken"] = next_token
if max_results is None:
kwargs["MaxResults"] = sagemaker_max_results
else:
kwargs["MaxResults"] = min(max_results - len(results), sagemaker_max_results)
response = partial_func(**kwargs)
self.log.debug("Fetched %s results.", len(response[result_key]))
results.extend(response[result_key])
if "NextToken" not in response or (max_results is not None and len(results) == max_results):
# Return when there are no results left (no NextToken) or when we've reached max_results.
return results
else:
next_token = response["NextToken"]
@staticmethod
def _name_matches_pattern(
processing_job_name: str,
found_name: str,
job_name_suffix: str | None = None,
) -> bool:
pattern = re.compile(f"^{processing_job_name}({job_name_suffix})?$")
return pattern.fullmatch(found_name) is not None
def count_processing_jobs_by_name(
self,
processing_job_name: str,
job_name_suffix: str | None = None,
throttle_retry_delay: int = 2,
retries: int = 3,
) -> int:
"""Get the number of processing jobs found with the provided name prefix.
:param processing_job_name: The prefix to look for.
:param job_name_suffix: The optional suffix which may be appended to deduplicate an existing job name.
:param throttle_retry_delay: Seconds to wait if a ThrottlingException is hit.
:param retries: The max number of times to retry.
:returns: The number of processing jobs that start with the provided prefix.
"""
try:
jobs = self.get_conn().list_processing_jobs(NameContains=processing_job_name)
# We want to make sure the job name starts with the provided name, not just contains it.
matching_jobs = [
job["ProcessingJobName"]
for job in jobs["ProcessingJobSummaries"]
if self._name_matches_pattern(processing_job_name, job["ProcessingJobName"], job_name_suffix)
]
return len(matching_jobs)
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFound":
# No jobs found with that name. This is good, return 0.
return 0
if e.response["Error"]["Code"] == "ThrottlingException" and retries:
# If we hit a ThrottlingException, back off a little and try again.
time.sleep(throttle_retry_delay)
return self.count_processing_jobs_by_name(
processing_job_name, job_name_suffix, throttle_retry_delay * 2, retries - 1
)
raise
def delete_model(self, model_name: str):
"""Delete a SageMaker model.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.delete_model`
:param model_name: name of the model
"""
try:
self.get_conn().delete_model(ModelName=model_name)
except Exception as general_error:
self.log.error("Failed to delete model, error: %s", general_error)
raise
def describe_pipeline_exec(self, pipeline_exec_arn: str, verbose: bool = False):
"""Get info about a SageMaker pipeline execution.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_pipeline_execution`
- :external+boto3:py:meth:`SageMaker.Client.list_pipeline_execution_steps`
:param pipeline_exec_arn: arn of the pipeline execution
:param verbose: Whether to log details about the steps status in the pipeline execution
"""
if verbose:
res = self.conn.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_exec_arn)
count_by_state = Counter(s["StepStatus"] for s in res["PipelineExecutionSteps"])
running_steps = [
s["StepName"] for s in res["PipelineExecutionSteps"] if s["StepStatus"] == "Executing"
]
self.log.info("state of the pipeline steps: %s", count_by_state)
self.log.info("steps currently in progress: %s", running_steps)
return self.conn.describe_pipeline_execution(PipelineExecutionArn=pipeline_exec_arn)
def start_pipeline(
self,
pipeline_name: str,
display_name: str = "airflow-triggered-execution",
pipeline_params: dict | None = None,
wait_for_completion: bool = False,
check_interval: int = 30,
verbose: bool = True,
) -> str:
"""Start a new execution for a SageMaker pipeline.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.start_pipeline_execution`
:param pipeline_name: Name of the pipeline to start (this is _not_ the ARN).
:param display_name: The name this pipeline execution will have in the UI. Doesn't need to be unique.
:param pipeline_params: Optional parameters for the pipeline.
All parameters supplied need to already be present in the pipeline definition.
:param wait_for_completion: Will only return once the pipeline is complete if true.
:param check_interval: How long to wait between checks for pipeline status when waiting for
completion.
:param verbose: Whether to print steps details when waiting for completion.
Defaults to true, consider turning off for pipelines that have thousands of steps.
:return: the ARN of the pipeline execution launched.
"""
formatted_params = format_tags(pipeline_params, key_label="Name")
try:
res = self.conn.start_pipeline_execution(
PipelineName=pipeline_name,
PipelineExecutionDisplayName=display_name,
PipelineParameters=formatted_params,
)
except ClientError as ce:
self.log.error("Failed to start pipeline execution, error: %s", ce)
raise
arn = res["PipelineExecutionArn"]
if wait_for_completion:
self.check_status(
arn,
"PipelineExecutionStatus",
lambda p: self.describe_pipeline_exec(p, verbose),
check_interval,
non_terminal_states=self.pipeline_non_terminal_states,
)
return arn
def stop_pipeline(
self,
pipeline_exec_arn: str,
wait_for_completion: bool = False,
check_interval: int = 10,
verbose: bool = True,
fail_if_not_running: bool = False,
) -> str:
"""Stop SageMaker pipeline execution.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.stop_pipeline_execution`
:param pipeline_exec_arn: Amazon Resource Name (ARN) of the pipeline execution.
It's the ARN of the pipeline itself followed by "/execution/" and an id.
:param wait_for_completion: Whether to wait for the pipeline to reach a final state.
(i.e. either 'Stopped' or 'Failed')
:param check_interval: How long to wait between checks for pipeline status when waiting for
completion.
:param verbose: Whether to print steps details when waiting for completion.
Defaults to true, consider turning off for pipelines that have thousands of steps.
:param fail_if_not_running: This method will raise an exception if the pipeline we're trying to stop
is not in an "Executing" state when the call is sent (which would mean that the pipeline is
already either stopping or stopped).
Note that setting this to True will raise an error if the pipeline finished successfully before it
was stopped.
:return: Status of the pipeline execution after the operation.
One of 'Executing'|'Stopping'|'Stopped'|'Failed'|'Succeeded'.
"""
retries = 2 # i.e. 3 calls max, 1 initial + 2 retries
while True:
try:
self.conn.stop_pipeline_execution(PipelineExecutionArn=pipeline_exec_arn)
break
except ClientError as ce:
# this can happen if the pipeline was transitioning between steps at that moment
if ce.response["Error"]["Code"] == "ConflictException" and retries > 0:
retries = retries - 1
self.log.warning(
"Got a conflict exception when trying to stop the pipeline, "
"retrying %s more times. Error was: %s",
retries,
ce,
)
time.sleep(0.3) # error is due to a race condition, so it should be very transient
continue
# we have to rely on the message to catch the right error here, because its type
# (ValidationException) is shared with other kinds of errors (e.g. badly formatted ARN)
if (
not fail_if_not_running
and "Only pipelines with 'Executing' status can be stopped"
in ce.response["Error"]["Message"]
):
self.log.warning("Cannot stop pipeline execution, as it was not running: %s", ce)
else:
self.log.error(ce)
raise
break
res = self.describe_pipeline_exec(pipeline_exec_arn)
if wait_for_completion and res["PipelineExecutionStatus"] in self.pipeline_non_terminal_states:
res = self.check_status(
pipeline_exec_arn,
"PipelineExecutionStatus",
lambda p: self.describe_pipeline_exec(p, verbose),
check_interval,
non_terminal_states=self.pipeline_non_terminal_states,
)
return res["PipelineExecutionStatus"]
def create_model_package_group(self, package_group_name: str, package_group_desc: str = "") -> bool:
"""Create a Model Package Group if it does not already exist.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_model_package_group`
:param package_group_name: Name of the model package group to create if not already present.
:param package_group_desc: Description of the model package group, if it was to be created (optional).
:return: True if the model package group was created, False if it already existed.
"""
try:
res = self.conn.create_model_package_group(
ModelPackageGroupName=package_group_name,
ModelPackageGroupDescription=package_group_desc,
)
self.log.info(
"Created new Model Package Group with name %s (ARN: %s)",
package_group_name,
res["ModelPackageGroupArn"],
)
return True
except ClientError as e:
# ValidationException can also happen if the package group name contains invalid char,
# so we have to look at the error message too
if e.response["Error"]["Code"] == "ValidationException" and e.response["Error"][
"Message"
].startswith("Model Package Group already exists"):
# log msg only so it doesn't look like an error
self.log.info("%s", e.response["Error"]["Message"])
return False
else:
self.log.error("Error when trying to create Model Package Group: %s", e)
raise
def _describe_auto_ml_job(self, job_name: str):
res = self.conn.describe_auto_ml_job(AutoMLJobName=job_name)
self.log.info("%s's current step: %s", job_name, res["AutoMLJobSecondaryStatus"])
return res
def create_auto_ml_job(
self,
job_name: str,
s3_input: str,
target_attribute: str,
s3_output: str,
role_arn: str,
compressed_input: bool = False,
time_limit: int | None = None,
autodeploy_endpoint_name: str | None = None,
extras: dict | None = None,
wait_for_completion: bool = True,
check_interval: int = 30,
) -> dict | None:
"""Create an auto ML job to predict the given column.
The learning input is based on data provided through S3 , and the output
is written to the specified S3 location.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_auto_ml_job`
:param job_name: Name of the job to create, needs to be unique within the account.
:param s3_input: The S3 location (folder or file) where to fetch the data.
By default, it expects csv with headers.
:param target_attribute: The name of the column containing the values to predict.
:param s3_output: The S3 folder where to write the model artifacts. Must be 128 characters or fewer.
:param role_arn: The ARN or the IAM role to use when interacting with S3.
Must have read access to the input, and write access to the output folder.
:param compressed_input: Set to True if the input is gzipped.
:param time_limit: The maximum amount of time in seconds to spend training the model(s).
:param autodeploy_endpoint_name: If specified, the best model will be deployed to an endpoint with
that name. No deployment made otherwise.
:param extras: Use this dictionary to set any variable input variable for job creation that is not
offered through the parameters of this function. The format is described in:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_auto_ml_job
:param wait_for_completion: Whether to wait for the job to finish before returning. Defaults to True.
:param check_interval: Interval in seconds between 2 status checks when waiting for completion.
:returns: Only if waiting for completion, a dictionary detailing the best model. The structure is that
of the "BestCandidate" key in:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.describe_auto_ml_job
"""
input_data = [
{
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": s3_input}},
"TargetAttributeName": target_attribute,
},
]
params_dict = {
"AutoMLJobName": job_name,
"InputDataConfig": input_data,
"OutputDataConfig": {"S3OutputPath": s3_output},
"RoleArn": role_arn,
}
if compressed_input:
input_data[0]["CompressionType"] = "Gzip"
if time_limit:
params_dict.update(
{"AutoMLJobConfig": {"CompletionCriteria": {"MaxAutoMLJobRuntimeInSeconds": time_limit}}}
)
if autodeploy_endpoint_name:
params_dict.update({"ModelDeployConfig": {"EndpointName": autodeploy_endpoint_name}})
if extras:
params_dict.update(extras)
# returns the job ARN, but we don't need it because we access it by its name
self.conn.create_auto_ml_job(**params_dict)
if wait_for_completion:
res = self.check_status(
job_name,
"AutoMLJobStatus",
# cannot pass the function directly because the parameter needs to be named
self._describe_auto_ml_job,
check_interval,
)
if "BestCandidate" in res:
return res["BestCandidate"]
return None
| 56,365 | 42.39184 | 139 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/redshift_cluster.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import warnings
from typing import Any, Sequence
import botocore.exceptions
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseAsyncHook, AwsBaseHook
class RedshiftHook(AwsBaseHook):
"""Interact with Amazon Redshift.
This is a thin wrapper around
:external+boto3:py:class:`boto3.client("redshift") <Redshift.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
template_fields: Sequence[str] = ("cluster_identifier",)
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "redshift"
super().__init__(*args, **kwargs)
def create_cluster(
self,
cluster_identifier: str,
node_type: str,
master_username: str,
master_user_password: str,
params: dict[str, Any],
) -> dict[str, Any]:
"""Create a new cluster with the specified parameters.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.create_cluster`
:param cluster_identifier: A unique identifier for the cluster.
:param node_type: The node type to be provisioned for the cluster.
Valid Values: ``ds2.xlarge``, ``ds2.8xlarge``, ``dc1.large``,
``dc1.8xlarge``, ``dc2.large``, ``dc2.8xlarge``, ``ra3.xlplus``,
``ra3.4xlarge``, and ``ra3.16xlarge``.
:param master_username: The username associated with the admin user account
for the cluster that is being created.
:param master_user_password: password associated with the admin user account
for the cluster that is being created.
:param params: Remaining AWS Create cluster API params.
"""
try:
response = self.get_conn().create_cluster(
ClusterIdentifier=cluster_identifier,
NodeType=node_type,
MasterUsername=master_username,
MasterUserPassword=master_user_password,
**params,
)
return response
except ClientError as e:
raise e
# TODO: Wrap create_cluster_snapshot
def cluster_status(self, cluster_identifier: str) -> str:
"""Get status of a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.describe_clusters`
:param cluster_identifier: unique identifier of a cluster
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:param final_cluster_snapshot_identifier: Optional[str]
"""
try:
response = self.get_conn().describe_clusters(ClusterIdentifier=cluster_identifier)["Clusters"]
return response[0]["ClusterStatus"] if response else None
except self.get_conn().exceptions.ClusterNotFoundFault:
return "cluster_not_found"
def delete_cluster(
self,
cluster_identifier: str,
skip_final_cluster_snapshot: bool = True,
final_cluster_snapshot_identifier: str | None = None,
):
"""Delete a cluster and optionally create a snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.delete_cluster`
:param cluster_identifier: unique identifier of a cluster
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:param final_cluster_snapshot_identifier: name of final cluster snapshot
"""
final_cluster_snapshot_identifier = final_cluster_snapshot_identifier or ""
response = self.get_conn().delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier,
)
return response["Cluster"] if response["Cluster"] else None
def describe_cluster_snapshots(self, cluster_identifier: str) -> list[str] | None:
"""List snapshots for a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.describe_cluster_snapshots`
:param cluster_identifier: unique identifier of a cluster
"""
response = self.get_conn().describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
if "Snapshots" not in response:
return None
snapshots = response["Snapshots"]
snapshots = [snapshot for snapshot in snapshots if snapshot["Status"]]
snapshots.sort(key=lambda x: x["SnapshotCreateTime"], reverse=True)
return snapshots
def restore_from_cluster_snapshot(self, cluster_identifier: str, snapshot_identifier: str) -> str:
"""Restore a cluster from its snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.restore_from_cluster_snapshot`
:param cluster_identifier: unique identifier of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
"""
response = self.get_conn().restore_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier
)
return response["Cluster"] if response["Cluster"] else None
def create_cluster_snapshot(
self,
snapshot_identifier: str,
cluster_identifier: str,
retention_period: int = -1,
tags: list[Any] | None = None,
) -> str:
"""Create a snapshot of a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.create_cluster_snapshot`
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:param cluster_identifier: unique identifier of a cluster
:param retention_period: The number of days that a manual snapshot is retained.
If the value is -1, the manual snapshot is retained indefinitely.
:param tags: A list of tag instances
"""
if tags is None:
tags = []
response = self.get_conn().create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
ManualSnapshotRetentionPeriod=retention_period,
Tags=tags,
)
return response["Snapshot"] if response["Snapshot"] else None
def get_cluster_snapshot_status(self, snapshot_identifier: str):
"""Get Redshift cluster snapshot status.
If cluster snapshot not found, *None* is returned.
:param snapshot_identifier: A unique identifier for the snapshot that you are requesting
"""
try:
response = self.get_conn().describe_cluster_snapshots(
SnapshotIdentifier=snapshot_identifier,
)
snapshot = response.get("Snapshots")[0]
snapshot_status: str = snapshot.get("Status")
return snapshot_status
except self.get_conn().exceptions.ClusterSnapshotNotFoundFault:
return None
class RedshiftAsyncHook(AwsBaseAsyncHook):
"""Interact with AWS Redshift using aiobotocore library."""
def __init__(self, *args, **kwargs):
warnings.warn(
"airflow.providers.amazon.aws.hook.base_aws.RedshiftAsyncHook has been deprecated and "
"will be removed in future",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["client_type"] = "redshift"
super().__init__(*args, **kwargs)
async def cluster_status(self, cluster_identifier: str, delete_operation: bool = False) -> dict[str, Any]:
"""Get the cluster status.
:param cluster_identifier: unique identifier of a cluster
:param delete_operation: whether the method has been called as part of delete cluster operation
"""
async with await self.get_client_async() as client:
try:
response = await client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster_state = (
response["Clusters"][0]["ClusterStatus"] if response and response["Clusters"] else None
)
return {"status": "success", "cluster_state": cluster_state}
except botocore.exceptions.ClientError as error:
if delete_operation and error.response.get("Error", {}).get("Code", "") == "ClusterNotFound":
return {"status": "success", "cluster_state": "cluster_not_found"}
return {"status": "error", "message": str(error)}
async def pause_cluster(self, cluster_identifier: str, poll_interval: float = 5.0) -> dict[str, Any]:
"""Pause the cluster.
:param cluster_identifier: unique identifier of a cluster
:param poll_interval: polling period in seconds to check for the status
"""
try:
async with await self.get_client_async() as client:
response = await client.pause_cluster(ClusterIdentifier=cluster_identifier)
status = response["Cluster"]["ClusterStatus"] if response and response["Cluster"] else None
if status == "pausing":
flag = asyncio.Event()
while True:
expected_response = await asyncio.create_task(
self.get_cluster_status(cluster_identifier, "paused", flag)
)
await asyncio.sleep(poll_interval)
if flag.is_set():
return expected_response
return {"status": "error", "cluster_state": status}
except botocore.exceptions.ClientError as error:
return {"status": "error", "message": str(error)}
async def resume_cluster(
self,
cluster_identifier: str,
polling_period_seconds: float = 5.0,
) -> dict[str, Any]:
"""Resume the cluster.
:param cluster_identifier: unique identifier of a cluster
:param polling_period_seconds: polling period in seconds to check for the status
"""
async with await self.get_client_async() as client:
try:
response = await client.resume_cluster(ClusterIdentifier=cluster_identifier)
status = response["Cluster"]["ClusterStatus"] if response and response["Cluster"] else None
if status == "resuming":
flag = asyncio.Event()
while True:
expected_response = await asyncio.create_task(
self.get_cluster_status(cluster_identifier, "available", flag)
)
await asyncio.sleep(polling_period_seconds)
if flag.is_set():
return expected_response
return {"status": "error", "cluster_state": status}
except botocore.exceptions.ClientError as error:
return {"status": "error", "message": str(error)}
async def get_cluster_status(
self,
cluster_identifier: str,
expected_state: str,
flag: asyncio.Event,
delete_operation: bool = False,
) -> dict[str, Any]:
"""Check for expected Redshift cluster state.
:param cluster_identifier: unique identifier of a cluster
:param expected_state: expected_state example("available", "pausing", "paused"")
:param flag: asyncio even flag set true if success and if any error
:param delete_operation: whether the method has been called as part of delete cluster operation
"""
try:
response = await self.cluster_status(cluster_identifier, delete_operation=delete_operation)
if ("cluster_state" in response and response["cluster_state"] == expected_state) or response[
"status"
] == "error":
flag.set()
return response
except botocore.exceptions.ClientError as error:
flag.set()
return {"status": "error", "message": str(error)}
| 13,169 | 41.899023 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/redshift_sql.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING
import redshift_connector
from redshift_connector import Connection as RedshiftConnection
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.common.sql.hooks.sql import DbApiHook
if TYPE_CHECKING:
from airflow.models.connection import Connection
class RedshiftSQLHook(DbApiHook):
"""Execute statements against Amazon Redshift.
This hook requires the redshift_conn_id connection.
Note: For AWS IAM authentication, use iam in the extra connection parameters
and set it to true. Leave the password field empty. This will use the
"aws_default" connection to get the temporary token unless you override
with aws_conn_id when initializing the hook.
The cluster-identifier is extracted from the beginning of
the host field, so is optional. It can however be overridden in the extra field.
extras example: ``{"iam":true}``
:param redshift_conn_id: reference to
:ref:`Amazon Redshift connection id<howto/connection:redshift>`
.. note::
get_sqlalchemy_engine() and get_uri() depend on sqlalchemy-amazon-redshift
"""
conn_name_attr = "redshift_conn_id"
default_conn_name = "redshift_default"
conn_type = "redshift"
hook_name = "Amazon Redshift"
supports_autocommit = True
def __init__(self, *args, aws_conn_id: str = "aws_default", **kwargs) -> None:
super().__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
@staticmethod
def get_ui_field_behaviour() -> dict:
"""Custom field behavior."""
return {
"hidden_fields": [],
"relabeling": {"login": "User", "schema": "Database"},
}
@cached_property
def conn(self):
return self.get_connection(self.redshift_conn_id) # type: ignore[attr-defined]
def _get_conn_params(self) -> dict[str, str | int]:
"""Retrieve connection parameters."""
conn = self.conn
conn_params: dict[str, str | int] = {}
if conn.extra_dejson.get("iam", False):
conn.login, conn.password, conn.port = self.get_iam_token(conn)
if conn.login:
conn_params["user"] = conn.login
if conn.password:
conn_params["password"] = conn.password
if conn.host:
conn_params["host"] = conn.host
if conn.port:
conn_params["port"] = conn.port
if conn.schema:
conn_params["database"] = conn.schema
return conn_params
def get_iam_token(self, conn: Connection) -> tuple[str, str, int]:
"""Retrieve a temporary password to connect to Redshift.
Port is required. If none is provided, default is used for each service.
"""
port = conn.port or 5439
# Pull the custer-identifier from the beginning of the Redshift URL
# ex. my-cluster.ccdre4hpd39h.us-east-1.redshift.amazonaws.com returns my-cluster
cluster_identifier = conn.extra_dejson.get("cluster_identifier")
if not cluster_identifier:
if conn.host:
cluster_identifier = conn.host.split(".", 1)[0]
else:
raise AirflowException("Please set cluster_identifier or host in redshift connection.")
redshift_client = AwsBaseHook(aws_conn_id=self.aws_conn_id, client_type="redshift").conn
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift.html#Redshift.Client.get_cluster_credentials
cluster_creds = redshift_client.get_cluster_credentials(
DbUser=conn.login,
DbName=conn.schema,
ClusterIdentifier=cluster_identifier,
AutoCreate=False,
)
token = cluster_creds["DbPassword"]
login = cluster_creds["DbUser"]
return login, token, port
def get_uri(self) -> str:
"""Overridden to use the Redshift dialect as driver name."""
conn_params = self._get_conn_params()
if "user" in conn_params:
conn_params["username"] = conn_params.pop("user")
# Compatibility: The 'create' factory method was added in SQLAlchemy 1.4
# to replace calling the default URL constructor directly.
create_url = getattr(URL, "create", URL)
return str(create_url(drivername="redshift+redshift_connector", **conn_params))
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""Overridden to pass Redshift-specific arguments."""
conn_kwargs = self.conn.extra_dejson
if engine_kwargs is None:
engine_kwargs = {}
if "connect_args" in engine_kwargs:
engine_kwargs["connect_args"] = {**conn_kwargs, **engine_kwargs["connect_args"]}
else:
engine_kwargs["connect_args"] = conn_kwargs
return create_engine(self.get_uri(), **engine_kwargs)
def get_table_primary_key(self, table: str, schema: str | None = "public") -> list[str] | None:
"""Get the table's primary key.
:param table: Name of the target table
:param schema: Name of the target schema, public by default
:return: Primary key columns list
"""
sql = """
select kcu.column_name
from information_schema.table_constraints tco
join information_schema.key_column_usage kcu
on kcu.constraint_name = tco.constraint_name
and kcu.constraint_schema = tco.constraint_schema
and kcu.constraint_name = tco.constraint_name
where tco.constraint_type = 'PRIMARY KEY'
and kcu.table_schema = %s
and kcu.table_name = %s
"""
pk_columns = [row[0] for row in self.get_records(sql, (schema, table))]
return pk_columns or None
def get_conn(self) -> RedshiftConnection:
"""Get a ``redshift_connector.Connection`` object."""
conn_params = self._get_conn_params()
conn_kwargs_dejson = self.conn.extra_dejson
conn_kwargs: dict = {**conn_params, **conn_kwargs_dejson}
return redshift_connector.connect(**conn_kwargs)
| 7,160 | 39.457627 | 138 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/glue_catalog.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS Glue Catalog Hook."""
from __future__ import annotations
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class GlueCatalogHook(AwsBaseHook):
"""
Interact with AWS Glue Data Catalog.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("glue") <Glue.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `AWS Glue Data Catalog \
<https://docs.aws.amazon.com/glue/latest/dg/components-overview.html#data-catalog-intro>`__
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type="glue", *args, **kwargs)
def get_partitions(
self,
database_name: str,
table_name: str,
expression: str = "",
page_size: int | None = None,
max_items: int | None = None,
) -> set[tuple]:
"""
Retrieves the partition values for a table.
.. seealso::
- :external+boto3:py:class:`Glue.Paginator.GetPartitions`
:param database_name: The name of the catalog database where the partitions reside.
:param table_name: The name of the partitions' table.
:param expression: An expression filtering the partitions to be returned.
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
:param page_size: pagination size
:param max_items: maximum items to return
:return: set of partition values where each value is a tuple since
a partition may be composed of multiple columns. For example:
``{('2018-01-01','1'), ('2018-01-01','2')}``
"""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("get_partitions")
response = paginator.paginate(
DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config
)
partitions = set()
for page in response:
for partition in page["Partitions"]:
partitions.add(tuple(partition["Values"]))
return partitions
def check_for_partition(self, database_name: str, table_name: str, expression: str) -> bool:
"""
Checks whether a partition exists.
.. code-block:: python
hook = GlueCatalogHook()
t = "static_babynames_partitioned"
hook.check_for_partition("airflow", t, "ds='2015-01-01'")
:param database_name: Name of hive database (schema) @table belongs to
:param table_name: Name of hive table @partition belongs to
:expression: Expression that matches the partitions to check for, e.g.: ``a = 'b' AND c = 'd'``
"""
partitions = self.get_partitions(database_name, table_name, expression, max_items=1)
return bool(partitions)
def get_table(self, database_name: str, table_name: str) -> dict:
"""
Get the information of the table.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_table`
.. code-block:: python
hook = GlueCatalogHook()
r = hook.get_table("db", "table_foo")
r["Name"] = "table_foo"
:param database_name: Name of hive database (schema) @table belongs to
:param table_name: Name of hive table
"""
result = self.get_conn().get_table(DatabaseName=database_name, Name=table_name)
return result["Table"]
def get_table_location(self, database_name: str, table_name: str) -> str:
"""
Get the physical location of the table.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_table`
:param database_name: Name of hive database (schema) @table belongs to
:param table_name: Name of hive table
"""
table = self.get_table(database_name, table_name)
return table["StorageDescriptor"]["Location"]
def get_partition(self, database_name: str, table_name: str, partition_values: list[str]) -> dict:
"""
Gets a Partition.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_partition`
.. code-block:: python
hook = GlueCatalogHook()
partition = hook.get_partition("db", "table", ["string"])
partition["Values"]
:param database_name: Database name
:param table_name: Database's Table name
:param partition_values: List of utf-8 strings that define the partition
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartition
:raises: AirflowException
"""
try:
response = self.get_conn().get_partition(
DatabaseName=database_name, TableName=table_name, PartitionValues=partition_values
)
return response["Partition"]
except ClientError as e:
self.log.error("Client error: %s", e)
raise AirflowException("AWS request failed, check logs for more info")
def create_partition(self, database_name: str, table_name: str, partition_input: dict) -> dict:
"""
Creates a new Partition.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.create_partition`
.. code-block:: python
hook = GlueCatalogHook()
partition_input = {"Values": []}
hook.create_partition(database_name="db", table_name="table", partition_input=partition_input)
:param database_name: Database name
:param table_name: Database's Table name
:param partition_input: Definition of how the partition is created
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-CreatePartition
:raises: AirflowException
"""
try:
return self.get_conn().create_partition(
DatabaseName=database_name, TableName=table_name, PartitionInput=partition_input
)
except ClientError as e:
self.log.error("Client error: %s", e)
raise AirflowException("AWS request failed, check logs for more info")
| 7,616 | 38.061538 | 139 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/athena.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains AWS Athena hook.
.. spelling:word-list::
PageIterator
"""
from __future__ import annotations
import warnings
from typing import Any
from botocore.paginate import PageIterator
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.utils.waiter_with_logging import wait
class AthenaHook(AwsBaseHook):
"""Interact with Amazon Athena.
Provide thick wrapper around
:external+boto3:py:class:`boto3.client("athena") <Athena.Client>`.
:param sleep_time: obsolete, please use the parameter of `poll_query_status` method instead
:param log_query: Whether to log athena query and other execution params
when it's executed. Defaults to *True*.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
INTERMEDIATE_STATES = (
"QUEUED",
"RUNNING",
)
FAILURE_STATES = (
"FAILED",
"CANCELLED",
)
SUCCESS_STATES = ("SUCCEEDED",)
TERMINAL_STATES = (
"SUCCEEDED",
"FAILED",
"CANCELLED",
)
def __init__(
self, *args: Any, sleep_time: int | None = None, log_query: bool = True, **kwargs: Any
) -> None:
super().__init__(client_type="athena", *args, **kwargs) # type: ignore
if sleep_time is not None:
self.sleep_time = sleep_time
warnings.warn(
"The `sleep_time` parameter of the Athena hook is deprecated, "
"please pass this parameter to the poll_query_status method instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
else:
self.sleep_time = 30 # previous default value
self.log_query = log_query
def run_query(
self,
query: str,
query_context: dict[str, str],
result_configuration: dict[str, Any],
client_request_token: str | None = None,
workgroup: str = "primary",
) -> str:
"""Run a Presto query on Athena with provided config.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.start_query_execution`
:param query: Presto query to run.
:param query_context: Context in which query need to be run.
:param result_configuration: Dict with path to store results in and
config related to encryption.
:param client_request_token: Unique token created by user to avoid
multiple executions of same query.
:param workgroup: Athena workgroup name, when not specified, will be
``'primary'``.
:return: Submitted query execution ID.
"""
params = {
"QueryString": query,
"QueryExecutionContext": query_context,
"ResultConfiguration": result_configuration,
"WorkGroup": workgroup,
}
if client_request_token:
params["ClientRequestToken"] = client_request_token
if self.log_query:
self.log.info("Running Query with params: %s", params)
response = self.get_conn().start_query_execution(**params)
query_execution_id = response["QueryExecutionId"]
self.log.info("Query execution id: %s", query_execution_id)
return query_execution_id
def check_query_status(self, query_execution_id: str) -> str | None:
"""Fetch the state of a submitted query.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
:return: One of valid query states, or *None* if the response is
malformed.
"""
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
state = None
try:
state = response["QueryExecution"]["Status"]["State"]
except Exception:
self.log.exception(
"Exception while getting query state. Query execution id: %s", query_execution_id
)
finally:
# The error is being absorbed here and is being handled by the caller.
# The error is being absorbed to implement retries.
return state
def get_state_change_reason(self, query_execution_id: str) -> str | None:
"""
Fetch the reason for a state change (e.g. error message). Returns None or reason string.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
"""
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
reason = None
try:
reason = response["QueryExecution"]["Status"]["StateChangeReason"]
except Exception:
self.log.exception(
"Exception while getting query state change reason. Query execution id: %s",
query_execution_id,
)
finally:
# The error is being absorbed here and is being handled by the caller.
# The error is being absorbed to implement retries.
return reason
def get_query_results(
self, query_execution_id: str, next_token_id: str | None = None, max_results: int = 1000
) -> dict | None:
"""Fetch submitted query results.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_results`
:param query_execution_id: Id of submitted athena query
:param next_token_id: The token that specifies where to start pagination.
:param max_results: The maximum number of results (rows) to return in this request.
:return: *None* if the query is in intermediate, failed, or cancelled
state. Otherwise a dict of query outputs.
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error("Invalid Query state. Query execution id: %s", query_execution_id)
return None
elif query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error(
'Query is in "%s" state. Cannot fetch results. Query execution id: %s',
query_state,
query_execution_id,
)
return None
result_params = {"QueryExecutionId": query_execution_id, "MaxResults": max_results}
if next_token_id:
result_params["NextToken"] = next_token_id
return self.get_conn().get_query_results(**result_params)
def get_query_results_paginator(
self,
query_execution_id: str,
max_items: int | None = None,
page_size: int | None = None,
starting_token: str | None = None,
) -> PageIterator | None:
"""Fetch submitted Athena query results.
.. seealso::
- :external+boto3:py:class:`Athena.Paginator.GetQueryResults`
:param query_execution_id: Id of submitted athena query
:param max_items: The total number of items to return.
:param page_size: The size of each page.
:param starting_token: A token to specify where to start paginating.
:return: *None* if the query is in intermediate, failed, or cancelled
state. Otherwise a paginator to iterate through pages of results.
Call :meth`.build_full_result()` on the returned paginator to get all
results at once.
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error("Invalid Query state (null). Query execution id: %s", query_execution_id)
return None
if query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error(
'Query is in "%s" state. Cannot fetch results, Query execution id: %s',
query_state,
query_execution_id,
)
return None
result_params = {
"QueryExecutionId": query_execution_id,
"PaginationConfig": {
"MaxItems": max_items,
"PageSize": page_size,
"StartingToken": starting_token,
},
}
paginator = self.get_conn().get_paginator("get_query_results")
return paginator.paginate(**result_params)
def poll_query_status(
self, query_execution_id: str, max_polling_attempts: int | None = None, sleep_time: int | None = None
) -> str | None:
"""Poll the state of a submitted query until it reaches final state.
:param query_execution_id: ID of submitted athena query
:param max_polling_attempts: Number of times to poll for query state before function exits
:param sleep_time: Time (in seconds) to wait between two consecutive query status checks.
:return: One of the final states
"""
try:
wait(
waiter=self.get_waiter("query_complete"),
waiter_delay=self.sleep_time if sleep_time is None else sleep_time,
waiter_max_attempts=max_polling_attempts or 120,
args={"QueryExecutionId": query_execution_id},
failure_message=f"Error while waiting for query {query_execution_id} to complete",
status_message=f"Query execution id: {query_execution_id}, "
f"Query is still in non-terminal state",
status_args=["QueryExecution.Status.State"],
)
except AirflowException as error:
# this function does not raise errors to keep previous behavior.
self.log.warning(error)
finally:
return self.check_query_status(query_execution_id)
def get_output_location(self, query_execution_id: str) -> str:
"""Get the output location of the query results in S3 URI format.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
"""
output_location = None
if query_execution_id:
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
if response:
try:
output_location = response["QueryExecution"]["ResultConfiguration"]["OutputLocation"]
except KeyError:
self.log.error(
"Error retrieving OutputLocation. Query execution id: %s", query_execution_id
)
raise
else:
raise
else:
raise ValueError("Invalid Query execution id. Query execution id: %s", query_execution_id)
return output_location
def stop_query(self, query_execution_id: str) -> dict:
"""Cancel the submitted query.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.stop_query_execution`
:param query_execution_id: Id of submitted athena query
"""
self.log.info("Stopping Query with executionId - %s", query_execution_id)
return self.get_conn().stop_query_execution(QueryExecutionId=query_execution_id)
| 12,358 | 39.257329 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/base_aws.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Base AWS Hook.
.. seealso::
For more information on how to use this hook, take a look at the guide:
:ref:`howto/connection:aws`
"""
from __future__ import annotations
import datetime
import inspect
import json
import logging
import os
import uuid
import warnings
from copy import deepcopy
from functools import cached_property, wraps
from os import PathLike
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar, Union
import boto3
import botocore
import botocore.session
import jinja2
import requests
import tenacity
from botocore.client import ClientMeta
from botocore.config import Config
from botocore.credentials import ReadOnlyCredentials
from botocore.waiter import Waiter, WaiterModel
from dateutil.tz import tzlocal
from slugify import slugify
from airflow.configuration import conf
from airflow.exceptions import (
AirflowException,
AirflowNotFoundException,
AirflowProviderDeprecationWarning,
)
from airflow.hooks.base import BaseHook
from airflow.providers.amazon.aws.utils.connection_wrapper import AwsConnectionWrapper
from airflow.providers_manager import ProvidersManager
from airflow.utils.helpers import exactly_one
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.secrets_masker import mask_secret
BaseAwsConnection = TypeVar("BaseAwsConnection", bound=Union[boto3.client, boto3.resource])
if TYPE_CHECKING:
from airflow.models.connection import Connection # Avoid circular imports.
class BaseSessionFactory(LoggingMixin):
"""Base AWS Session Factory class.
This handles synchronous and async boto session creation. It can handle most
of the AWS supported authentication methods.
User can also derive from this class to have full control of boto3 session
creation or to support custom federation.
.. note::
Not all features implemented for synchronous sessions are available
for async sessions.
.. seealso::
- :ref:`howto/connection:aws:session-factory`
"""
def __init__(
self,
conn: Connection | AwsConnectionWrapper | None,
region_name: str | None = None,
config: Config | None = None,
) -> None:
super().__init__()
self._conn = conn
self._region_name = region_name
self._config = config
@cached_property
def conn(self) -> AwsConnectionWrapper:
"""Cached AWS Connection Wrapper."""
return AwsConnectionWrapper(
conn=self._conn,
region_name=self._region_name,
botocore_config=self._config,
)
@cached_property
def basic_session(self) -> boto3.session.Session:
"""Cached property with basic boto3.session.Session."""
return self._create_basic_session(session_kwargs=self.conn.session_kwargs)
@property
def extra_config(self) -> dict[str, Any]:
"""AWS Connection extra_config."""
return self.conn.extra_config
@property
def region_name(self) -> str | None:
"""AWS Region Name read-only property."""
return self.conn.region_name
@property
def config(self) -> Config | None:
"""Configuration for botocore client read-only property."""
return self.conn.botocore_config
@property
def role_arn(self) -> str | None:
"""Assume Role ARN from AWS Connection."""
return self.conn.role_arn
def _apply_session_kwargs(self, session):
if self.conn.session_kwargs.get("profile_name", None) is not None:
session.set_config_variable("profile", self.conn.session_kwargs["profile_name"])
if (
self.conn.session_kwargs.get("aws_access_key_id", None)
or self.conn.session_kwargs.get("aws_secret_access_key", None)
or self.conn.session_kwargs.get("aws_session_token", None)
):
session.set_credentials(
access_key=self.conn.session_kwargs.get("aws_access_key_id"),
secret_key=self.conn.session_kwargs.get("aws_secret_access_key"),
token=self.conn.session_kwargs.get("aws_session_token"),
)
if self.conn.session_kwargs.get("region_name", None) is not None:
session.set_config_variable("region", self.conn.session_kwargs["region_name"])
def get_async_session(self):
from aiobotocore.session import get_session as async_get_session
return async_get_session()
def create_session(self, deferrable: bool = False) -> boto3.session.Session:
"""Create boto3 or aiobotocore Session from connection config."""
if not self.conn:
self.log.info(
"No connection ID provided. Fallback on boto3 credential strategy (region_name=%r). "
"See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html",
self.region_name,
)
if deferrable:
session = self.get_async_session()
self._apply_session_kwargs(session)
return session
else:
return boto3.session.Session(region_name=self.region_name)
elif not self.role_arn:
if deferrable:
session = self.get_async_session()
self._apply_session_kwargs(session)
return session
else:
return self.basic_session
# Values stored in ``AwsConnectionWrapper.session_kwargs`` are intended to be used only
# to create the initial boto3 session.
# If the user wants to use the 'assume_role' mechanism then only the 'region_name' needs to be
# provided, otherwise other parameters might conflict with the base botocore session.
# Unfortunately it is not a part of public boto3 API, see source of boto3.session.Session:
# https://boto3.amazonaws.com/v1/documentation/api/latest/_modules/boto3/session.html#Session
# If we provide 'aws_access_key_id' or 'aws_secret_access_key' or 'aws_session_token'
# as part of session kwargs it will use them instead of assumed credentials.
assume_session_kwargs = {}
if self.conn.region_name:
assume_session_kwargs["region_name"] = self.conn.region_name
return self._create_session_with_assume_role(
session_kwargs=assume_session_kwargs, deferrable=deferrable
)
def _create_basic_session(self, session_kwargs: dict[str, Any]) -> boto3.session.Session:
return boto3.session.Session(**session_kwargs)
def _create_session_with_assume_role(
self, session_kwargs: dict[str, Any], deferrable: bool = False
) -> boto3.session.Session:
if self.conn.assume_role_method == "assume_role_with_web_identity":
# Deferred credentials have no initial credentials
credential_fetcher = self._get_web_identity_credential_fetcher()
credentials = botocore.credentials.DeferredRefreshableCredentials(
method="assume-role-with-web-identity",
refresh_using=credential_fetcher.fetch_credentials,
time_fetcher=lambda: datetime.datetime.now(tz=tzlocal()),
)
else:
# Refreshable credentials do have initial credentials
credentials = botocore.credentials.RefreshableCredentials.create_from_metadata(
metadata=self._refresh_credentials(),
refresh_using=self._refresh_credentials,
method="sts-assume-role",
)
if deferrable:
from aiobotocore.session import get_session as async_get_session
session = async_get_session()
else:
session = botocore.session.get_session()
session._credentials = credentials
session.set_config_variable("region", self.basic_session.region_name)
return boto3.session.Session(botocore_session=session, **session_kwargs)
def _refresh_credentials(self) -> dict[str, Any]:
self.log.debug("Refreshing credentials")
assume_role_method = self.conn.assume_role_method
if assume_role_method not in ("assume_role", "assume_role_with_saml"):
raise NotImplementedError(f"assume_role_method={assume_role_method} not expected")
sts_client = self.basic_session.client("sts", config=self.config)
if assume_role_method == "assume_role":
sts_response = self._assume_role(sts_client=sts_client)
else:
sts_response = self._assume_role_with_saml(sts_client=sts_client)
sts_response_http_status = sts_response["ResponseMetadata"]["HTTPStatusCode"]
if sts_response_http_status != 200:
raise RuntimeError(f"sts_response_http_status={sts_response_http_status}")
credentials = sts_response["Credentials"]
expiry_time = credentials.get("Expiration").isoformat()
self.log.debug("New credentials expiry_time: %s", expiry_time)
credentials = {
"access_key": credentials.get("AccessKeyId"),
"secret_key": credentials.get("SecretAccessKey"),
"token": credentials.get("SessionToken"),
"expiry_time": expiry_time,
}
return credentials
def _assume_role(self, sts_client: boto3.client) -> dict:
kw = {
"RoleSessionName": self._strip_invalid_session_name_characters(f"Airflow_{self.conn.conn_id}"),
**self.conn.assume_role_kwargs,
"RoleArn": self.role_arn,
}
return sts_client.assume_role(**kw)
def _assume_role_with_saml(self, sts_client: boto3.client) -> dict[str, Any]:
saml_config = self.extra_config["assume_role_with_saml"]
principal_arn = saml_config["principal_arn"]
idp_auth_method = saml_config["idp_auth_method"]
if idp_auth_method == "http_spegno_auth":
saml_assertion = self._fetch_saml_assertion_using_http_spegno_auth(saml_config)
else:
raise NotImplementedError(
f"idp_auth_method={idp_auth_method} in Connection {self.conn.conn_id} Extra."
'Currently only "http_spegno_auth" is supported, and must be specified.'
)
self.log.debug("Doing sts_client.assume_role_with_saml to role_arn=%s", self.role_arn)
return sts_client.assume_role_with_saml(
RoleArn=self.role_arn,
PrincipalArn=principal_arn,
SAMLAssertion=saml_assertion,
**self.conn.assume_role_kwargs,
)
def _get_idp_response(
self, saml_config: dict[str, Any], auth: requests.auth.AuthBase
) -> requests.models.Response:
idp_url = saml_config["idp_url"]
self.log.debug("idp_url= %s", idp_url)
session = requests.Session()
# Configurable Retry when querying the IDP endpoint
if "idp_request_retry_kwargs" in saml_config:
idp_request_retry_kwargs = saml_config["idp_request_retry_kwargs"]
self.log.info("idp_request_retry_kwargs= %s", idp_request_retry_kwargs)
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
retry_strategy = Retry(**idp_request_retry_kwargs)
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("https://", adapter)
session.mount("http://", adapter)
idp_request_kwargs = {}
if "idp_request_kwargs" in saml_config:
idp_request_kwargs = saml_config["idp_request_kwargs"]
idp_response = session.get(idp_url, auth=auth, **idp_request_kwargs)
idp_response.raise_for_status()
return idp_response
def _fetch_saml_assertion_using_http_spegno_auth(self, saml_config: dict[str, Any]) -> str:
# requests_gssapi will need paramiko > 2.6 since you'll need
# 'gssapi' not 'python-gssapi' from PyPi.
# https://github.com/paramiko/paramiko/pull/1311
import requests_gssapi
from lxml import etree
auth = requests_gssapi.HTTPSPNEGOAuth()
if "mutual_authentication" in saml_config:
mutual_auth = saml_config["mutual_authentication"]
if mutual_auth == "REQUIRED":
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.REQUIRED)
elif mutual_auth == "OPTIONAL":
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.OPTIONAL)
elif mutual_auth == "DISABLED":
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.DISABLED)
else:
raise NotImplementedError(
f"mutual_authentication={mutual_auth} in Connection {self.conn.conn_id} Extra."
'Currently "REQUIRED", "OPTIONAL" and "DISABLED" are supported.'
"(Exclude this setting will default to HTTPSPNEGOAuth() )."
)
# Query the IDP
idp_response = self._get_idp_response(saml_config, auth=auth)
# Assist with debugging. Note: contains sensitive info!
xpath = saml_config["saml_response_xpath"]
log_idp_response = "log_idp_response" in saml_config and saml_config["log_idp_response"]
if log_idp_response:
self.log.warning(
"The IDP response contains sensitive information, but log_idp_response is ON (%s).",
log_idp_response,
)
self.log.debug("idp_response.content= %s", idp_response.content)
self.log.debug("xpath= %s", xpath)
# Extract SAML Assertion from the returned HTML / XML
xml = etree.fromstring(idp_response.content)
saml_assertion = xml.xpath(xpath)
if isinstance(saml_assertion, list):
if len(saml_assertion) == 1:
saml_assertion = saml_assertion[0]
if not saml_assertion:
raise ValueError("Invalid SAML Assertion")
return saml_assertion
def _get_web_identity_credential_fetcher(
self,
) -> botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher:
base_session = self.basic_session._session or botocore.session.get_session()
client_creator = base_session.create_client
federation = str(self.extra_config.get("assume_role_with_web_identity_federation"))
web_identity_token_loader = {
"file": self._get_file_token_loader,
"google": self._get_google_identity_token_loader,
}.get(federation)
if not web_identity_token_loader:
raise AirflowException(f"Unsupported federation: {federation}.")
return botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=client_creator,
web_identity_token_loader=web_identity_token_loader(),
role_arn=self.role_arn,
extra_args=self.conn.assume_role_kwargs,
)
def _get_file_token_loader(self):
from botocore.credentials import FileWebIdentityTokenLoader
token_file = self.extra_config.get("assume_role_with_web_identity_token_file") or os.getenv(
"AWS_WEB_IDENTITY_TOKEN_FILE"
)
return FileWebIdentityTokenLoader(token_file)
def _get_google_identity_token_loader(self):
from google.auth.transport import requests as requests_transport
from airflow.providers.google.common.utils.id_token_credentials import (
get_default_id_token_credentials,
)
audience = self.extra_config.get("assume_role_with_web_identity_federation_audience")
google_id_token_credentials = get_default_id_token_credentials(target_audience=audience)
def web_identity_token_loader():
if not google_id_token_credentials.valid:
request_adapter = requests_transport.Request()
google_id_token_credentials.refresh(request=request_adapter)
return google_id_token_credentials.token
return web_identity_token_loader
def _strip_invalid_session_name_characters(self, role_session_name: str) -> str:
return slugify(role_session_name, regex_pattern=r"[^\w+=,.@-]+")
class AwsGenericHook(BaseHook, Generic[BaseAwsConnection]):
"""Generic class for interact with AWS.
This class provide a thin wrapper around the boto3 Python library.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param client_type: Reference to :external:py:meth:`boto3.client service_name \
<boto3.session.Session.client>`, e.g. 'emr', 'batch', 's3', etc.
Mutually exclusive with ``resource_type``.
:param resource_type: Reference to :external:py:meth:`boto3.resource service_name \
<boto3.session.Session.resource>`, e.g. 's3', 'ec2', 'dynamodb', etc.
Mutually exclusive with ``client_type``.
:param config: Configuration for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
conn_name_attr = "aws_conn_id"
default_conn_name = "aws_default"
conn_type = "aws"
hook_name = "Amazon Web Services"
def __init__(
self,
aws_conn_id: str | None = default_conn_name,
verify: bool | str | None = None,
region_name: str | None = None,
client_type: str | None = None,
resource_type: str | None = None,
config: Config | None = None,
) -> None:
super().__init__()
self.aws_conn_id = aws_conn_id
self.client_type = client_type
self.resource_type = resource_type
self._region_name = region_name
self._config = config
self._verify = verify
@classmethod
def _get_provider_version(cls) -> str:
"""Checks the Providers Manager for the package version."""
try:
manager = ProvidersManager()
hook = manager.hooks[cls.conn_type]
if not hook:
# This gets caught immediately, but without it MyPy complains
# Item "None" of "Optional[HookInfo]" has no attribute "package_name"
# on the following line and static checks fail.
raise ValueError(f"Hook info for {cls.conn_type} not found in the Provider Manager.")
provider = manager.providers[hook.package_name]
return provider.version
except Exception:
# Under no condition should an error here ever cause an issue for the user.
return "Unknown"
@staticmethod
def _find_class_name(target_function_name: str) -> str:
"""Given a frame off the stack, return the name of the class that made the call.
This method may raise a ValueError or an IndexError. The caller is
responsible with catching and handling those.
"""
stack = inspect.stack()
# Find the index of the most recent frame which called the provided function name.
target_frame_index = [frame.function for frame in stack].index(target_function_name)
# Pull that frame off the stack.
target_frame = stack[target_frame_index][0]
# Get the local variables for that frame.
frame_variables = target_frame.f_locals["self"]
# Get the class object for that frame.
frame_class_object = frame_variables.__class__
# Return the name of the class object.
return frame_class_object.__name__
def _get_caller(self, target_function_name: str = "execute") -> str:
"""Given a function name, walk the stack and return the name of the class which called it last."""
try:
caller = self._find_class_name(target_function_name)
if caller == "BaseSensorOperator":
# If the result is a BaseSensorOperator, then look for whatever last called "poke".
return self._get_caller("poke")
return caller
except Exception:
# Under no condition should an error here ever cause an issue for the user.
return "Unknown"
@staticmethod
def _generate_dag_key() -> str:
"""Generate a DAG key.
The Object Identifier (OID) namespace is used to salt the dag_id value.
That salted value is used to generate a SHA-1 hash which, by definition,
can not (reasonably) be reversed. No personal data can be inferred or
extracted from the resulting UUID.
"""
try:
dag_id = os.environ["AIRFLOW_CTX_DAG_ID"]
return str(uuid.uuid5(uuid.NAMESPACE_OID, dag_id))
except Exception:
# Under no condition should an error here ever cause an issue for the user.
return "00000000-0000-0000-0000-000000000000"
@staticmethod
def _get_airflow_version() -> str:
"""Fetch and return the current Airflow version."""
try:
# This can be a circular import under specific configurations.
# Importing locally to either avoid or catch it if it does happen.
from airflow import __version__ as airflow_version
return airflow_version
except Exception:
# Under no condition should an error here ever cause an issue for the user.
return "Unknown"
def _generate_user_agent_extra_field(self, existing_user_agent_extra: str) -> str:
user_agent_extra_values = [
f"Airflow/{self._get_airflow_version()}",
f"AmPP/{self._get_provider_version()}",
f"Caller/{self._get_caller()}",
f"DagRunKey/{self._generate_dag_key()}",
existing_user_agent_extra or "",
]
return " ".join(user_agent_extra_values).strip()
@cached_property
def conn_config(self) -> AwsConnectionWrapper:
"""Get the Airflow Connection object and wrap it in helper (cached)."""
connection = None
if self.aws_conn_id:
try:
connection = self.get_connection(self.aws_conn_id)
except AirflowNotFoundException:
self.log.warning(
"Unable to find AWS Connection ID '%s', switching to empty.", self.aws_conn_id
)
return AwsConnectionWrapper(
conn=connection, region_name=self._region_name, botocore_config=self._config, verify=self._verify
)
@property
def service_config(self) -> dict:
service_name = self.client_type or self.resource_type
return self.conn_config.get_service_config(service_name)
@property
def region_name(self) -> str | None:
"""AWS Region Name read-only property."""
return self.conn_config.region_name
@property
def config(self) -> Config:
"""Configuration for botocore client read-only property."""
return self.conn_config.botocore_config or botocore.config.Config()
@property
def verify(self) -> bool | str | None:
"""Verify or not SSL certificates boto3 client/resource read-only property."""
return self.conn_config.verify
def get_session(self, region_name: str | None = None, deferrable: bool = False) -> boto3.session.Session:
"""Get the underlying boto3.session.Session(region_name=region_name)."""
return SessionFactory(
conn=self.conn_config, region_name=region_name, config=self.config
).create_session(deferrable=deferrable)
def _get_config(self, config: Config | None = None) -> Config:
"""
No AWS Operators use the config argument to this method.
Keep backward compatibility with other users who might use it.
"""
if config is None:
config = deepcopy(self.config)
# ignore[union-attr] is required for this block to appease MyPy
# because the user_agent_extra field is generated at runtime.
user_agent_config = Config(
user_agent_extra=self._generate_user_agent_extra_field(
existing_user_agent_extra=config.user_agent_extra # type: ignore[union-attr]
)
)
return config.merge(user_agent_config) # type: ignore[union-attr]
def get_client_type(
self,
region_name: str | None = None,
config: Config | None = None,
deferrable: bool = False,
) -> boto3.client:
"""Get the underlying boto3 client using boto3 session."""
client_type = self.client_type
session = self.get_session(region_name=region_name, deferrable=deferrable)
if not isinstance(session, boto3.session.Session):
return session.create_client(
client_type,
endpoint_url=self.conn_config.endpoint_url,
config=self._get_config(config),
verify=self.verify,
)
return session.client(
client_type,
endpoint_url=self.conn_config.endpoint_url,
config=self._get_config(config),
verify=self.verify,
)
def get_resource_type(
self,
region_name: str | None = None,
config: Config | None = None,
) -> boto3.resource:
"""Get the underlying boto3 resource using boto3 session."""
resource_type = self.resource_type
session = self.get_session(region_name=region_name)
return session.resource(
resource_type,
endpoint_url=self.conn_config.endpoint_url,
config=self._get_config(config),
verify=self.verify,
)
@cached_property
def conn(self) -> BaseAwsConnection:
"""
Get the underlying boto3 client/resource (cached).
:return: boto3.client or boto3.resource
"""
if not exactly_one(self.client_type, self.resource_type):
raise ValueError(
f"Either client_type={self.client_type!r} or "
f"resource_type={self.resource_type!r} must be provided, not both."
)
elif self.client_type:
return self.get_client_type(region_name=self.region_name)
else:
return self.get_resource_type(region_name=self.region_name)
@property
def async_conn(self):
"""Get an aiobotocore client to use for async operations."""
if not self.client_type:
raise ValueError("client_type must be specified.")
return self.get_client_type(region_name=self.region_name, deferrable=True)
@cached_property
def conn_client_meta(self) -> ClientMeta:
"""Get botocore client metadata from Hook connection (cached)."""
conn = self.conn
if isinstance(conn, botocore.client.BaseClient):
return conn.meta
return conn.meta.client.meta
@property
def conn_region_name(self) -> str:
"""Get actual AWS Region Name from Hook connection (cached)."""
return self.conn_client_meta.region_name
@property
def conn_partition(self) -> str:
"""Get associated AWS Region Partition from Hook connection (cached)."""
return self.conn_client_meta.partition
def get_conn(self) -> BaseAwsConnection:
"""
Get the underlying boto3 client/resource (cached).
Implemented so that caching works as intended. It exists for compatibility
with subclasses that rely on a super().get_conn() method.
:return: boto3.client or boto3.resource
"""
# Compat shim
return self.conn
def get_credentials(self, region_name: str | None = None) -> ReadOnlyCredentials:
"""
Get the underlying `botocore.Credentials` object.
This contains the following authentication attributes: access_key, secret_key and token.
By use this method also secret_key and token will mask in tasks logs.
"""
# Credentials are refreshable, so accessing your access key and
# secret key separately can lead to a race condition.
# See https://stackoverflow.com/a/36291428/8283373
creds = self.get_session(region_name=region_name).get_credentials().get_frozen_credentials()
mask_secret(creds.secret_key)
if creds.token:
mask_secret(creds.token)
return creds
def expand_role(self, role: str, region_name: str | None = None) -> str:
"""Get the Amazon Resource Name (ARN) for the role.
If IAM role is already an IAM role ARN, the value is returned unchanged.
:param role: IAM role name or ARN
:param region_name: Optional region name to get credentials for
:return: IAM role ARN
"""
if "/" in role:
return role
else:
session = self.get_session(region_name=region_name)
_client = session.client(
"iam", endpoint_url=self.conn_config.endpoint_url, config=self.config, verify=self.verify
)
return _client.get_role(RoleName=role)["Role"]["Arn"]
@staticmethod
def retry(should_retry: Callable[[Exception], bool]):
"""Repeat requests in response to exceeding a temporary quote limit."""
def retry_decorator(fun: Callable):
@wraps(fun)
def decorator_f(self, *args, **kwargs):
retry_args = getattr(self, "retry_args", None)
if retry_args is None:
return fun(self, *args, **kwargs)
multiplier = retry_args.get("multiplier", 1)
min_limit = retry_args.get("min", 1)
max_limit = retry_args.get("max", 1)
stop_after_delay = retry_args.get("stop_after_delay", 10)
tenacity_before_logger = tenacity.before_log(self.log, logging.INFO) if self.log else None
tenacity_after_logger = tenacity.after_log(self.log, logging.INFO) if self.log else None
default_kwargs = {
"wait": tenacity.wait_exponential(multiplier=multiplier, max=max_limit, min=min_limit),
"retry": tenacity.retry_if_exception(should_retry),
"stop": tenacity.stop_after_delay(stop_after_delay),
"before": tenacity_before_logger,
"after": tenacity_after_logger,
}
return tenacity.retry(**default_kwargs)(fun)(self, *args, **kwargs)
return decorator_f
return retry_decorator
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom UI field behaviour for AWS Connection."""
return {
"hidden_fields": ["host", "schema", "port"],
"relabeling": {
"login": "AWS Access Key ID",
"password": "AWS Secret Access Key",
},
"placeholders": {
"login": "AKIAIOSFODNN7EXAMPLE",
"password": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
"extra": json.dumps(
{
"region_name": "us-east-1",
"session_kwargs": {"profile_name": "default"},
"config_kwargs": {"retries": {"mode": "standard", "max_attempts": 10}},
"role_arn": "arn:aws:iam::123456789098:role/role-name",
"assume_role_method": "assume_role",
"assume_role_kwargs": {"RoleSessionName": "airflow"},
"aws_session_token": "AQoDYXdzEJr...EXAMPLETOKEN",
"endpoint_url": "http://localhost:4566",
},
indent=2,
),
},
}
def test_connection(self):
"""Test the AWS connection by call AWS STS (Security Token Service) GetCallerIdentity API.
.. seealso::
https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html
"""
try:
session = self.get_session()
conn_info = session.client("sts").get_caller_identity()
metadata = conn_info.pop("ResponseMetadata", {})
if metadata.get("HTTPStatusCode") != 200:
try:
return False, json.dumps(metadata)
except TypeError:
return False, str(metadata)
conn_info["credentials_method"] = session.get_credentials().method
conn_info["region_name"] = session.region_name
return True, ", ".join(f"{k}={v!r}" for k, v in conn_info.items())
except Exception as e:
return False, str(f"{type(e).__name__!r} error occurred while testing connection: {e}")
@cached_property
def waiter_path(self) -> PathLike[str] | None:
filename = self.client_type if self.client_type else self.resource_type
path = Path(__file__).parents[1].joinpath(f"waiters/{filename}.json").resolve()
return path if path.exists() else None
def get_waiter(
self,
waiter_name: str,
parameters: dict[str, str] | None = None,
deferrable: bool = False,
client=None,
) -> Waiter:
"""Get a waiter by name.
First checks if there is a custom waiter with the provided waiter_name and
uses that if it exists, otherwise it will check the service client for a
waiter that matches the name and pass that through.
If `deferrable` is True, the waiter will be an AIOWaiter, generated from the
client that is passed as a parameter. If `deferrable` is True, `client` must be
provided.
:param waiter_name: The name of the waiter. The name should exactly match the
name of the key in the waiter model file (typically this is CamelCase).
:param parameters: will scan the waiter config for the keys of that dict,
and replace them with the corresponding value. If a custom waiter has
such keys to be expanded, they need to be provided here.
:param deferrable: If True, the waiter is going to be an async custom waiter.
An async client must be provided in that case.
:param client: The client to use for the waiter's operations
"""
from airflow.providers.amazon.aws.waiters.base_waiter import BaseBotoWaiter
if deferrable and not client:
raise ValueError("client must be provided for a deferrable waiter.")
client = client or self.conn
if self.waiter_path and (waiter_name in self._list_custom_waiters()):
# Currently, the custom waiter doesn't work with resource_type, only client_type is supported.
if self.resource_type:
credentials = self.get_credentials()
client = boto3.client(
self.resource_type,
region_name=self.region_name,
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
aws_session_token=credentials.token,
)
# Technically if waiter_name is in custom_waiters then self.waiter_path must
# exist but MyPy doesn't like the fact that self.waiter_path could be None.
with open(self.waiter_path) as config_file:
config = json.loads(config_file.read())
config = self._apply_parameters_value(config, waiter_name, parameters)
return BaseBotoWaiter(client=client, model_config=config, deferrable=deferrable).waiter(
waiter_name
)
# If there is no custom waiter found for the provided name,
# then try checking the service's official waiters.
return client.get_waiter(waiter_name)
@staticmethod
def _apply_parameters_value(config: dict, waiter_name: str, parameters: dict[str, str] | None) -> dict:
"""Replaces potential jinja templates in acceptors definition."""
# only process the waiter we're going to use to not raise errors for missing params for other waiters.
acceptors = config["waiters"][waiter_name]["acceptors"]
for a in acceptors:
arg = a["argument"]
template = jinja2.Template(arg, autoescape=False, undefined=jinja2.StrictUndefined)
try:
a["argument"] = template.render(parameters or {})
except jinja2.UndefinedError as e:
raise AirflowException(
f"Parameter was not supplied for templated waiter's acceptor '{arg}'", e
)
return config
def list_waiters(self) -> list[str]:
"""Returns a list containing the names of all waiters for the service, official and custom."""
return [*self._list_official_waiters(), *self._list_custom_waiters()]
def _list_official_waiters(self) -> list[str]:
return self.conn.waiter_names
def _list_custom_waiters(self) -> list[str]:
if not self.waiter_path:
return []
with open(self.waiter_path) as config_file:
model_config = json.load(config_file)
return WaiterModel(model_config).waiter_names
class AwsBaseHook(AwsGenericHook[Union[boto3.client, boto3.resource]]):
"""Base class for interact with AWS.
This class provide a thin wrapper around the boto3 Python library.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param client_type: Reference to :external:py:meth:`boto3.client service_name \
<boto3.session.Session.client>`, e.g. 'emr', 'batch', 's3', etc.
Mutually exclusive with ``resource_type``.
:param resource_type: Reference to :external:py:meth:`boto3.resource service_name \
<boto3.session.Session.resource>`, e.g. 's3', 'ec2', 'dynamodb', etc.
Mutually exclusive with ``client_type``.
:param config: Configuration for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
def resolve_session_factory() -> type[BaseSessionFactory]:
"""Resolves custom SessionFactory class."""
clazz = conf.getimport("aws", "session_factory", fallback=None)
if not clazz:
return BaseSessionFactory
if not issubclass(clazz, BaseSessionFactory):
raise TypeError(
f"Your custom AWS SessionFactory class `{clazz.__name__}` is not a subclass "
f"of `{BaseSessionFactory.__name__}`."
)
return clazz
SessionFactory = resolve_session_factory()
def _parse_s3_config(config_file_name: str, config_format: str | None = "boto", profile: str | None = None):
"""For compatibility with airflow.contrib.hooks.aws_hook."""
from airflow.providers.amazon.aws.utils.connection_wrapper import _parse_s3_config
return _parse_s3_config(
config_file_name=config_file_name,
config_format=config_format,
profile=profile,
)
try:
import aiobotocore.credentials
from aiobotocore.session import AioSession, get_session
except ImportError:
pass
class BaseAsyncSessionFactory(BaseSessionFactory):
"""
Base AWS Session Factory class to handle aiobotocore session creation.
It currently, handles ENV, AWS secret key and STS client method ``assume_role``
provided in Airflow connection
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"airflow.providers.amazon.aws.hook.base_aws.BaseAsyncSessionFactory has been deprecated and "
"will be removed in future",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
async def get_role_credentials(self) -> dict:
"""Get the role_arn, method credentials from connection and get the role credentials."""
async with self._basic_session.create_client("sts", region_name=self.region_name) as client:
response = await client.assume_role(
RoleArn=self.role_arn,
RoleSessionName=self._strip_invalid_session_name_characters(f"Airflow_{self.conn.conn_id}"),
**self.conn.assume_role_kwargs,
)
return response["Credentials"]
async def _get_refresh_credentials(self) -> dict[str, Any]:
self.log.debug("Refreshing credentials")
assume_role_method = self.conn.assume_role_method
if assume_role_method != "assume_role":
raise NotImplementedError(f"assume_role_method={assume_role_method} not expected")
credentials = await self.get_role_credentials()
expiry_time = credentials["Expiration"].isoformat()
self.log.debug("New credentials expiry_time: %s", expiry_time)
credentials = {
"access_key": credentials.get("AccessKeyId"),
"secret_key": credentials.get("SecretAccessKey"),
"token": credentials.get("SessionToken"),
"expiry_time": expiry_time,
}
return credentials
def _get_session_with_assume_role(self) -> AioSession:
assume_role_method = self.conn.assume_role_method
if assume_role_method != "assume_role":
raise NotImplementedError(f"assume_role_method={assume_role_method} not expected")
credentials = aiobotocore.credentials.AioRefreshableCredentials.create_from_metadata(
metadata=self._get_refresh_credentials(),
refresh_using=self._get_refresh_credentials,
method="sts-assume-role",
)
session = aiobotocore.session.get_session()
session._credentials = credentials
return session
@cached_property
def _basic_session(self) -> AioSession:
"""Cached property with basic aiobotocore.session.AioSession."""
session_kwargs = self.conn.session_kwargs
aws_access_key_id = session_kwargs.get("aws_access_key_id")
aws_secret_access_key = session_kwargs.get("aws_secret_access_key")
aws_session_token = session_kwargs.get("aws_session_token")
region_name = session_kwargs.get("region_name")
profile_name = session_kwargs.get("profile_name")
aio_session = get_session()
if profile_name is not None:
aio_session.set_config_variable("profile", profile_name)
if aws_access_key_id or aws_secret_access_key or aws_session_token:
aio_session.set_credentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token,
)
if region_name is not None:
aio_session.set_config_variable("region", region_name)
return aio_session
def create_session(self, deferrable: bool = False) -> AioSession:
"""Create aiobotocore Session from connection and config."""
if not self._conn:
self.log.info("No connection ID provided. Fallback on boto3 credential strategy")
return get_session()
elif not self.role_arn:
return self._basic_session
return self._get_session_with_assume_role()
class AwsBaseAsyncHook(AwsBaseHook):
"""Interacts with AWS using aiobotocore asynchronously.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default botocore behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default botocore configuration would be used (and must be
maintained on each worker node).
:param verify: Whether to verify SSL certificates.
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param client_type: boto3.client client_type. Eg 's3', 'emr' etc
:param resource_type: boto3.resource resource_type. Eg 'dynamodb' etc
:param config: Configuration for botocore client.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"airflow.providers.amazon.aws.hook.base_aws.AwsBaseAsyncHook has been deprecated and "
"will be removed in future",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def get_async_session(self) -> AioSession:
"""Get the underlying aiobotocore.session.AioSession(...)."""
return BaseAsyncSessionFactory(
conn=self.conn_config, region_name=self.region_name, config=self.config
).create_session()
async def get_client_async(self):
"""Get the underlying aiobotocore client using aiobotocore session."""
return self.get_async_session().create_client(
self.client_type,
region_name=self.region_name,
verify=self.verify,
endpoint_url=self.conn_config.endpoint_url,
config=self.config,
)
| 46,780 | 41.683394 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/datasync.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Interact with AWS DataSync, using the AWS ``boto3`` library."""
from __future__ import annotations
import time
from urllib.parse import urlsplit
from airflow.exceptions import AirflowBadRequest, AirflowException, AirflowTaskTimeout
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class DataSyncHook(AwsBaseHook):
"""
Interact with AWS DataSync.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("datasync") <DataSync.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
:param wait_interval_seconds: Time to wait between two
consecutive calls to check TaskExecution status. Defaults to 30 seconds.
:raises ValueError: If wait_interval_seconds is not between 0 and 15*60 seconds.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
TASK_EXECUTION_INTERMEDIATE_STATES = (
"INITIALIZING",
"QUEUED",
"LAUNCHING",
"PREPARING",
"TRANSFERRING",
"VERIFYING",
)
TASK_EXECUTION_FAILURE_STATES = ("ERROR",)
TASK_EXECUTION_SUCCESS_STATES = ("SUCCESS",)
def __init__(self, wait_interval_seconds: int = 30, *args, **kwargs) -> None:
super().__init__(client_type="datasync", *args, **kwargs) # type: ignore[misc]
self.locations: list = []
self.tasks: list = []
# wait_interval_seconds = 0 is used during unit tests
if wait_interval_seconds < 0 or wait_interval_seconds > 15 * 60:
raise ValueError(f"Invalid wait_interval_seconds {wait_interval_seconds}")
self.wait_interval_seconds = wait_interval_seconds
def create_location(self, location_uri: str, **create_location_kwargs) -> str:
"""
Creates a new location.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.create_location_s3`
- :external+boto3:py:meth:`DataSync.Client.create_location_smb`
- :external+boto3:py:meth:`DataSync.Client.create_location_nfs`
- :external+boto3:py:meth:`DataSync.Client.create_location_efs`
:param location_uri: Location URI used to determine the location type (S3, SMB, NFS, EFS).
:param create_location_kwargs: Passed to ``DataSync.Client.create_location_*`` methods.
:return: LocationArn of the created Location.
:raises AirflowException: If location type (prefix from ``location_uri``) is invalid.
"""
schema = urlsplit(location_uri).scheme
if schema == "smb":
location = self.get_conn().create_location_smb(**create_location_kwargs)
elif schema == "s3":
location = self.get_conn().create_location_s3(**create_location_kwargs)
elif schema == "nfs":
location = self.get_conn().create_location_nfs(**create_location_kwargs)
elif schema == "efs":
location = self.get_conn().create_location_efs(**create_location_kwargs)
else:
raise AirflowException(f"Invalid/Unsupported location type: {schema}")
self._refresh_locations()
return location["LocationArn"]
def get_location_arns(
self, location_uri: str, case_sensitive: bool = False, ignore_trailing_slash: bool = True
) -> list[str]:
"""
Return all LocationArns which match a LocationUri.
:param location_uri: Location URI to search for, eg ``s3://mybucket/mypath``
:param case_sensitive: Do a case sensitive search for location URI.
:param ignore_trailing_slash: Ignore / at the end of URI when matching.
:return: List of LocationArns.
:raises AirflowBadRequest: if ``location_uri`` is empty
"""
if not location_uri:
raise AirflowBadRequest("location_uri not specified")
if not self.locations:
self._refresh_locations()
result = []
if not case_sensitive:
location_uri = location_uri.lower()
if ignore_trailing_slash and location_uri.endswith("/"):
location_uri = location_uri[:-1]
for location_from_aws in self.locations:
location_uri_from_aws = location_from_aws["LocationUri"]
if not case_sensitive:
location_uri_from_aws = location_uri_from_aws.lower()
if ignore_trailing_slash and location_uri_from_aws.endswith("/"):
location_uri_from_aws = location_uri_from_aws[:-1]
if location_uri == location_uri_from_aws:
result.append(location_from_aws["LocationArn"])
return result
def _refresh_locations(self) -> None:
"""Refresh the local list of Locations."""
self.locations = []
next_token = None
while True:
if next_token:
locations = self.get_conn().list_locations(NextToken=next_token)
else:
locations = self.get_conn().list_locations()
self.locations.extend(locations["Locations"])
if "NextToken" not in locations:
break
next_token = locations["NextToken"]
def create_task(
self, source_location_arn: str, destination_location_arn: str, **create_task_kwargs
) -> str:
"""Create a Task between the specified source and destination LocationArns.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.create_task`
:param source_location_arn: Source LocationArn. Must exist already.
:param destination_location_arn: Destination LocationArn. Must exist already.
:param create_task_kwargs: Passed to ``boto.create_task()``. See AWS boto3 datasync documentation.
:return: TaskArn of the created Task
"""
task = self.get_conn().create_task(
SourceLocationArn=source_location_arn,
DestinationLocationArn=destination_location_arn,
**create_task_kwargs,
)
self._refresh_tasks()
return task["TaskArn"]
def update_task(self, task_arn: str, **update_task_kwargs) -> None:
"""Update a Task.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.update_task`
:param task_arn: The TaskArn to update.
:param update_task_kwargs: Passed to ``boto.update_task()``, See AWS boto3 datasync documentation.
"""
self.get_conn().update_task(TaskArn=task_arn, **update_task_kwargs)
def delete_task(self, task_arn: str) -> None:
"""Delete a Task.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.delete_task`
:param task_arn: The TaskArn to delete.
"""
self.get_conn().delete_task(TaskArn=task_arn)
def _refresh_tasks(self) -> None:
"""Refreshes the local list of Tasks."""
self.tasks = []
next_token = None
while True:
if next_token:
tasks = self.get_conn().list_tasks(NextToken=next_token)
else:
tasks = self.get_conn().list_tasks()
self.tasks.extend(tasks["Tasks"])
if "NextToken" not in tasks:
break
next_token = tasks["NextToken"]
def get_task_arns_for_location_arns(
self,
source_location_arns: list,
destination_location_arns: list,
) -> list:
"""
Return list of TaskArns which use both a specified source and destination LocationArns.
:param source_location_arns: List of source LocationArns.
:param destination_location_arns: List of destination LocationArns.
:raises AirflowBadRequest: if ``source_location_arns`` or ``destination_location_arns`` are empty.
"""
if not source_location_arns:
raise AirflowBadRequest("source_location_arns not specified")
if not destination_location_arns:
raise AirflowBadRequest("destination_location_arns not specified")
if not self.tasks:
self._refresh_tasks()
result = []
for task in self.tasks:
task_arn = task["TaskArn"]
task_description = self.get_task_description(task_arn)
if task_description["SourceLocationArn"] in source_location_arns:
if task_description["DestinationLocationArn"] in destination_location_arns:
result.append(task_arn)
return result
def start_task_execution(self, task_arn: str, **kwargs) -> str:
"""
Start a TaskExecution for the specified task_arn.
Each task can have at most one TaskExecution.
Additional keyword arguments send to ``start_task_execution`` boto3 method.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.start_task_execution`
:param task_arn: TaskArn
:return: TaskExecutionArn
:raises ClientError: If a TaskExecution is already busy running for this ``task_arn``.
:raises AirflowBadRequest: If ``task_arn`` is empty.
"""
if not task_arn:
raise AirflowBadRequest("task_arn not specified")
task_execution = self.get_conn().start_task_execution(TaskArn=task_arn, **kwargs)
return task_execution["TaskExecutionArn"]
def cancel_task_execution(self, task_execution_arn: str) -> None:
"""
Cancel a TaskExecution for the specified ``task_execution_arn``.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.cancel_task_execution`
:param task_execution_arn: TaskExecutionArn.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
"""
if not task_execution_arn:
raise AirflowBadRequest("task_execution_arn not specified")
self.get_conn().cancel_task_execution(TaskExecutionArn=task_execution_arn)
def get_task_description(self, task_arn: str) -> dict:
"""
Get description for the specified ``task_arn``.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.describe_task`
:param task_arn: TaskArn
:return: AWS metadata about a task.
:raises AirflowBadRequest: If ``task_arn`` is empty.
"""
if not task_arn:
raise AirflowBadRequest("task_arn not specified")
return self.get_conn().describe_task(TaskArn=task_arn)
def describe_task_execution(self, task_execution_arn: str) -> dict:
"""
Get description for the specified ``task_execution_arn``.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.describe_task_execution`
:param task_execution_arn: TaskExecutionArn
:return: AWS metadata about a task execution.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
"""
return self.get_conn().describe_task_execution(TaskExecutionArn=task_execution_arn)
def get_current_task_execution_arn(self, task_arn: str) -> str | None:
"""
Get current TaskExecutionArn (if one exists) for the specified ``task_arn``.
:param task_arn: TaskArn
:return: CurrentTaskExecutionArn for this ``task_arn`` or None.
:raises AirflowBadRequest: if ``task_arn`` is empty.
"""
if not task_arn:
raise AirflowBadRequest("task_arn not specified")
task_description = self.get_task_description(task_arn)
if "CurrentTaskExecutionArn" in task_description:
return task_description["CurrentTaskExecutionArn"]
return None
def wait_for_task_execution(self, task_execution_arn: str, max_iterations: int = 60) -> bool:
"""
Wait for Task Execution status to be complete (SUCCESS/ERROR).
The ``task_execution_arn`` must exist, or a boto3 ClientError will be raised.
:param task_execution_arn: TaskExecutionArn
:param max_iterations: Maximum number of iterations before timing out.
:return: Result of task execution.
:raises AirflowTaskTimeout: If maximum iterations is exceeded.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
"""
if not task_execution_arn:
raise AirflowBadRequest("task_execution_arn not specified")
status = None
iterations = max_iterations
while status is None or status in self.TASK_EXECUTION_INTERMEDIATE_STATES:
task_execution = self.get_conn().describe_task_execution(TaskExecutionArn=task_execution_arn)
status = task_execution["Status"]
self.log.info("status=%s", status)
iterations -= 1
if status in self.TASK_EXECUTION_FAILURE_STATES:
break
if status in self.TASK_EXECUTION_SUCCESS_STATES:
break
if iterations <= 0:
break
time.sleep(self.wait_interval_seconds)
if status in self.TASK_EXECUTION_SUCCESS_STATES:
return True
if status in self.TASK_EXECUTION_FAILURE_STATES:
return False
if iterations <= 0:
raise AirflowTaskTimeout("Max iterations exceeded!")
raise AirflowException(f"Unknown status: {status}") # Should never happen
| 14,079 | 40.656805 | 106 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/kinesis.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS Firehose hook."""
from __future__ import annotations
from typing import Iterable
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class FirehoseHook(AwsBaseHook):
"""
Interact with Amazon Kinesis Firehose.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("firehose") <Firehose.Client>`.
:param delivery_stream: Name of the delivery stream
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, delivery_stream: str, *args, **kwargs) -> None:
self.delivery_stream = delivery_stream
kwargs["client_type"] = "firehose"
super().__init__(*args, **kwargs)
def put_records(self, records: Iterable):
"""Write batch records to Kinesis Firehose.
.. seealso::
- :external+boto3:py:meth:`Firehose.Client.put_record_batch`
:param records: list of records
"""
return self.get_conn().put_record_batch(DeliveryStreamName=self.delivery_stream, Records=records)
| 1,996 | 35.309091 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/batch_waiters.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
AWS Batch service waiters.
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/blob/develop/botocore/waiter.py
"""
from __future__ import annotations
import json
import sys
from copy import deepcopy
from pathlib import Path
from typing import Callable
import botocore.client
import botocore.exceptions
import botocore.waiter
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.batch_client import BatchClientHook
from airflow.providers.amazon.aws.utils.task_log_fetcher import AwsTaskLogFetcher
class BatchWaitersHook(BatchClientHook):
"""
A utility to manage waiters for AWS Batch services.
.. code-block:: python
import random
from airflow.providers.amazon.aws.operators.batch_waiters import BatchWaiters
# to inspect default waiters
waiters = BatchWaiters()
config = waiters.default_config # type: Dict
waiter_names = waiters.list_waiters() # -> ["JobComplete", "JobExists", "JobRunning"]
# The default_config is a useful stepping stone to creating custom waiters, e.g.
custom_config = waiters.default_config # this is a deepcopy
# modify custom_config['waiters'] as necessary and get a new instance:
waiters = BatchWaiters(waiter_config=custom_config)
waiters.waiter_config # check the custom configuration (this is a deepcopy)
waiters.list_waiters() # names of custom waiters
# During the init for BatchWaiters, the waiter_config is used to build a waiter_model;
# and note that this only occurs during the class init, to avoid any accidental mutations
# of waiter_config leaking into the waiter_model.
waiters.waiter_model # -> botocore.waiter.WaiterModel object
# The waiter_model is combined with the waiters.client to get a specific waiter
# and the details of the config on that waiter can be further modified without any
# accidental impact on the generation of new waiters from the defined waiter_model, e.g.
waiters.get_waiter("JobExists").config.delay # -> 5
waiter = waiters.get_waiter("JobExists") # -> botocore.waiter.Batch.Waiter.JobExists object
waiter.config.delay = 10
waiters.get_waiter("JobExists").config.delay # -> 5 as defined by waiter_model
# To use a specific waiter, update the config and call the `wait()` method for jobId, e.g.
waiter = waiters.get_waiter("JobExists") # -> botocore.waiter.Batch.Waiter.JobExists object
waiter.config.delay = random.uniform(1, 10) # seconds
waiter.config.max_attempts = 10
waiter.wait(jobs=[jobId])
.. seealso::
- https://www.2ndwatch.com/blog/use-waiters-boto3-write/
- https://github.com/boto/botocore/blob/develop/botocore/waiter.py
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#waiters
- https://github.com/boto/botocore/tree/develop/botocore/data/ec2/2016-11-15
- https://github.com/boto/botocore/issues/1915
:param waiter_config: a custom waiter configuration for AWS Batch services
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html).
:param region_name: region name to use in AWS client.
Override the AWS region in connection (if provided)
"""
def __init__(self, *args, waiter_config: dict | None = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._default_config: dict | None = None
self._waiter_config = waiter_config or self.default_config
self._waiter_model = botocore.waiter.WaiterModel(self._waiter_config)
@property
def default_config(self) -> dict:
"""
An immutable default waiter configuration.
:return: a waiter configuration for AWS Batch services
"""
if self._default_config is None:
config_path = Path(__file__).with_name("batch_waiters.json").resolve()
with open(config_path) as config_file:
self._default_config = json.load(config_file)
return deepcopy(self._default_config) # avoid accidental mutation
@property
def waiter_config(self) -> dict:
"""
An immutable waiter configuration for this instance; a ``deepcopy`` is returned by this property.
During the init for BatchWaiters, the waiter_config is used to build a
waiter_model and this only occurs during the class init, to avoid any
accidental mutations of waiter_config leaking into the waiter_model.
:return: a waiter configuration for AWS Batch services
"""
return deepcopy(self._waiter_config) # avoid accidental mutation
@property
def waiter_model(self) -> botocore.waiter.WaiterModel:
"""
A configured waiter model used to generate waiters on AWS Batch services.
:return: a waiter model for AWS Batch services
"""
return self._waiter_model
def get_waiter(
self, waiter_name: str, _: dict[str, str] | None = None, deferrable: bool = False, client=None
) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter, using the configured ``.waiter_model``.
The ``.waiter_model`` is combined with the ``.client`` to get a specific waiter and
the properties of that waiter can be modified without any accidental impact on the
generation of new waiters from the ``.waiter_model``, e.g.
.. code-block:: python
waiters.get_waiter("JobExists").config.delay # -> 5
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = 10
waiters.get_waiter("JobExists").config.delay # -> 5 as defined by waiter_model
To use a specific waiter, update the config and call the `wait()` method for jobId, e.g.
.. code-block:: python
import random
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = random.uniform(1, 10) # seconds
waiter.config.max_attempts = 10
waiter.wait(jobs=[jobId])
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing); see ``.list_waiters``.
:param _: unused, just here to match the method signature in base_aws
:return: a waiter object for the named AWS Batch service
"""
return botocore.waiter.create_waiter_with_client(waiter_name, self.waiter_model, self.client)
def list_waiters(self) -> list[str]:
"""
List the waiters in a waiter configuration for AWS Batch services.
:return: waiter names for AWS Batch services
"""
return self.waiter_model.waiter_names
def wait_for_job(
self,
job_id: str,
delay: int | float | None = None,
get_batch_log_fetcher: Callable[[str], AwsTaskLogFetcher | None] | None = None,
) -> None:
"""
Wait for Batch job to complete.
This assumes that the ``.waiter_model`` is configured using some
variation of the ``.default_config`` so that it can generate waiters
with the following names: "JobExists", "JobRunning" and "JobComplete".
:param job_id: a Batch job ID
:param delay: A delay before polling for job status
:param get_batch_log_fetcher: A method that returns batch_log_fetcher of
type AwsTaskLogFetcher or None when the CloudWatch log stream hasn't been created yet.
:raises: AirflowException
.. note::
This method adds a small random jitter to the ``delay`` (+/- 2 sec, >= 1 sec).
Using a random interval helps to avoid AWS API throttle limits when many
concurrent tasks request job-descriptions.
It also modifies the ``max_attempts`` to use the ``sys.maxsize``,
which allows Airflow to manage the timeout on waiting.
"""
self.delay(delay)
try:
waiter = self.get_waiter("JobExists")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
waiter = self.get_waiter("JobRunning")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
batch_log_fetcher = None
try:
if get_batch_log_fetcher:
batch_log_fetcher = get_batch_log_fetcher(job_id)
if batch_log_fetcher:
batch_log_fetcher.start()
waiter = self.get_waiter("JobComplete")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
finally:
if batch_log_fetcher:
batch_log_fetcher.stop()
batch_log_fetcher.join()
except (botocore.exceptions.ClientError, botocore.exceptions.WaiterError) as err:
raise AirflowException(err)
| 10,541 | 41.337349 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/redshift_data.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from pprint import pformat
from time import sleep
from typing import TYPE_CHECKING, Any, Iterable
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.utils import trim_none_values
if TYPE_CHECKING:
from mypy_boto3_redshift_data import RedshiftDataAPIServiceClient # noqa
class RedshiftDataHook(AwsGenericHook["RedshiftDataAPIServiceClient"]):
"""
Interact with Amazon Redshift Data API.
Provide thin wrapper around
:external+boto3:py:class:`boto3.client("redshift-data") <RedshiftDataAPIService.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `Amazon Redshift Data API \
<https://docs.aws.amazon.com/redshift-data/latest/APIReference/Welcome.html>`__
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "redshift-data"
super().__init__(*args, **kwargs)
def execute_query(
self,
database: str,
sql: str | list[str],
cluster_identifier: str | None = None,
db_user: str | None = None,
parameters: Iterable | None = None,
secret_arn: str | None = None,
statement_name: str | None = None,
with_event: bool = False,
wait_for_completion: bool = True,
poll_interval: int = 10,
) -> str:
"""
Execute a statement against Amazon Redshift.
:param database: the name of the database
:param sql: the SQL statement or list of SQL statement to run
:param cluster_identifier: unique identifier of a cluster
:param db_user: the database username
:param parameters: the parameters for the SQL statement
:param secret_arn: the name or ARN of the secret that enables db access
:param statement_name: the name of the SQL statement
:param with_event: indicates whether to send an event to EventBridge
:param wait_for_completion: indicates whether to wait for a result, if True wait, if False don't wait
:param poll_interval: how often in seconds to check the query status
:returns statement_id: str, the UUID of the statement
"""
kwargs: dict[str, Any] = {
"ClusterIdentifier": cluster_identifier,
"Database": database,
"DbUser": db_user,
"Parameters": parameters,
"WithEvent": with_event,
"SecretArn": secret_arn,
"StatementName": statement_name,
}
if isinstance(sql, list):
kwargs["Sqls"] = sql
resp = self.conn.batch_execute_statement(**trim_none_values(kwargs))
else:
kwargs["Sql"] = sql
resp = self.conn.execute_statement(**trim_none_values(kwargs))
statement_id = resp["Id"]
if wait_for_completion:
self.wait_for_results(statement_id, poll_interval=poll_interval)
return statement_id
def wait_for_results(self, statement_id, poll_interval):
while True:
self.log.info("Polling statement %s", statement_id)
resp = self.conn.describe_statement(
Id=statement_id,
)
status = resp["Status"]
if status == "FINISHED":
num_rows = resp.get("ResultRows")
if num_rows is not None:
self.log.info("Processed %s rows", num_rows)
return status
elif status == "FAILED" or status == "ABORTED":
raise ValueError(
f"Statement {statement_id!r} terminated with status {status}. "
f"Response details: {pformat(resp)}"
)
else:
self.log.info("Query %s", status)
sleep(poll_interval)
def get_table_primary_key(
self,
table: str,
database: str,
schema: str | None = "public",
cluster_identifier: str | None = None,
db_user: str | None = None,
secret_arn: str | None = None,
statement_name: str | None = None,
with_event: bool = False,
wait_for_completion: bool = True,
poll_interval: int = 10,
) -> list[str] | None:
"""
Helper method that returns the table primary key.
Copied from ``RedshiftSQLHook.get_table_primary_key()``
:param table: Name of the target table
:param database: the name of the database
:param schema: Name of the target schema, public by default
:param sql: the SQL statement or list of SQL statement to run
:param cluster_identifier: unique identifier of a cluster
:param db_user: the database username
:param secret_arn: the name or ARN of the secret that enables db access
:param statement_name: the name of the SQL statement
:param with_event: indicates whether to send an event to EventBridge
:param wait_for_completion: indicates whether to wait for a result, if True wait, if False don't wait
:param poll_interval: how often in seconds to check the query status
:return: Primary key columns list
"""
sql = f"""
select kcu.column_name
from information_schema.table_constraints tco
join information_schema.key_column_usage kcu
on kcu.constraint_name = tco.constraint_name
and kcu.constraint_schema = tco.constraint_schema
and kcu.constraint_name = tco.constraint_name
where tco.constraint_type = 'PRIMARY KEY'
and kcu.table_schema = {schema}
and kcu.table_name = {table}
"""
stmt_id = self.execute_query(
sql=sql,
database=database,
cluster_identifier=cluster_identifier,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
with_event=with_event,
wait_for_completion=wait_for_completion,
poll_interval=poll_interval,
)
pk_columns = []
token = ""
while True:
kwargs = dict(Id=stmt_id)
if token:
kwargs["NextToken"] = token
response = self.conn.get_statement_result(**kwargs)
# we only select a single column (that is a string),
# so safe to assume that there is only a single col in the record
pk_columns += [y["stringValue"] for x in response["Records"] for y in x]
if "NextToken" not in response.keys():
break
else:
token = response["NextToken"]
return pk_columns or None
| 7,722 | 38.809278 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/lambda_function.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS Lambda hook."""
from __future__ import annotations
from typing import Any
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.utils import trim_none_values
class LambdaHook(AwsBaseHook):
"""
Interact with AWS Lambda.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("lambda") <Lambda.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "lambda"
super().__init__(*args, **kwargs)
def invoke_lambda(
self,
*,
function_name: str,
invocation_type: str | None = None,
log_type: str | None = None,
client_context: str | None = None,
payload: bytes | str | None = None,
qualifier: str | None = None,
):
"""
Invoke Lambda Function.
.. seealso::
- :external+boto3:py:meth:`Lambda.Client.invoke`
:param function_name: AWS Lambda Function Name
:param invocation_type: AWS Lambda Invocation Type (RequestResponse, Event etc)
:param log_type: Tail Invocation Request
:param client_context: Up to 3,583 bytes of base64-encoded data about the invoking client
to pass to the function in the context object.
:param payload: The JSON that you want to provide to your Lambda function as input.
:param qualifier: AWS Lambda Function Version or Alias Name
"""
if isinstance(payload, str):
payload = payload.encode()
invoke_args = {
"FunctionName": function_name,
"InvocationType": invocation_type,
"LogType": log_type,
"ClientContext": client_context,
"Payload": payload,
"Qualifier": qualifier,
}
return self.conn.invoke(**trim_none_values(invoke_args))
def create_lambda(
self,
*,
function_name: str,
runtime: str | None = None,
role: str,
handler: str | None = None,
code: dict,
description: str | None = None,
timeout: int | None = None,
memory_size: int | None = None,
publish: bool | None = None,
vpc_config: Any | None = None,
package_type: str | None = None,
dead_letter_config: Any | None = None,
environment: Any | None = None,
kms_key_arn: str | None = None,
tracing_config: Any | None = None,
tags: Any | None = None,
layers: list | None = None,
file_system_configs: list[Any] | None = None,
image_config: Any | None = None,
code_signing_config_arn: str | None = None,
architectures: list[str] | None = None,
) -> dict:
"""
Creates a Lambda function.
.. seealso::
- :external+boto3:py:meth:`Lambda.Client.create_function`
- `Configuring a Lambda function to access resources in a VPC \
<https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html>`__
:param function_name: AWS Lambda Function Name
:param runtime: The identifier of the function's runtime.
Runtime is required if the deployment package is a .zip file archive.
:param role: The Amazon Resource Name (ARN) of the function's execution role.
:param handler: The name of the method within your code that Lambda calls to run your function.
Handler is required if the deployment package is a .zip file archive.
:param code: The code for the function.
:param description: A description of the function.
:param timeout: The amount of time (in seconds) that Lambda
allows a function to run before stopping it.
:param memory_size: The amount of memory available to the function at runtime.
Increasing the function memory also increases its CPU allocation.
:param publish: Set to true to publish the first version of the function during creation.
:param vpc_config: For network connectivity to Amazon Web Services resources in a VPC,
specify a list of security groups and subnets in the VPC.
:param package_type: The type of deployment package.
Set to `Image` for container image and set to `Zip` for .zip file archive.
:param dead_letter_config: A dead-letter queue configuration that specifies the queue or topic
where Lambda sends asynchronous events when they fail processing.
:param environment: Environment variables that are accessible from function code during execution.
:param kms_key_arn: The ARN of the Key Management Service (KMS) key that's used to
encrypt your function's environment variables.
If it's not provided, Lambda uses a default service key.
:param tracing_config: Set `Mode` to `Active` to sample and trace
a subset of incoming requests with X-Ray.
:param tags: A list of tags to apply to the function.
:param layers: A list of function layers to add to the function's execution environment.
Specify each layer by its ARN, including the version.
:param file_system_configs: Connection settings for an Amazon EFS file system.
:param image_config: Container image configuration values that override
the values in the container image Dockerfile.
:param code_signing_config_arn: To enable code signing for this function,
specify the ARN of a code-signing configuration.
A code-signing configuration includes a set of signing profiles,
which define the trusted publishers for this function.
:param architectures: The instruction set architecture that the function supports.
"""
if package_type == "Zip":
if handler is None:
raise TypeError("Parameter 'handler' is required if 'package_type' is 'Zip'")
if runtime is None:
raise TypeError("Parameter 'runtime' is required if 'package_type' is 'Zip'")
"""Create a Lambda Function"""
create_function_args = {
"FunctionName": function_name,
"Runtime": runtime,
"Role": role,
"Handler": handler,
"Code": code,
"Description": description,
"Timeout": timeout,
"MemorySize": memory_size,
"Publish": publish,
"VpcConfig": vpc_config,
"PackageType": package_type,
"DeadLetterConfig": dead_letter_config,
"Environment": environment,
"KMSKeyArn": kms_key_arn,
"TracingConfig": tracing_config,
"Tags": tags,
"Layers": layers,
"FileSystemConfigs": file_system_configs,
"ImageConfig": image_config,
"CodeSigningConfigArn": code_signing_config_arn,
"Architectures": architectures,
}
return self.conn.create_function(**trim_none_values(create_function_args))
| 8,095 | 43.483516 | 106 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/ses.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS SES Hook."""
from __future__ import annotations
from typing import Any, Iterable
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils.email import build_mime_message
class SesHook(AwsBaseHook):
"""
Interact with Amazon Simple Email Service.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("ses") <SES.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "ses"
super().__init__(*args, **kwargs)
def send_email(
self,
mail_from: str,
to: str | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
reply_to: str | None = None,
return_path: str | None = None,
custom_headers: dict[str, Any] | None = None,
) -> dict:
"""
Send email using Amazon Simple Email Service.
.. seealso::
- :external+boto3:py:meth:`SES.Client.send_raw_email`
:param mail_from: Email address to set as email's from
:param to: List of email addresses to set as email's to
:param subject: Email's subject
:param html_content: Content of email in HTML format
:param files: List of paths of files to be attached
:param cc: List of email addresses to set as email's CC
:param bcc: List of email addresses to set as email's BCC
:param mime_subtype: Can be used to specify the sub-type of the message. Default = mixed
:param mime_charset: Email's charset. Default = UTF-8.
:param return_path: The email address to which replies will be sent. By default, replies
are sent to the original sender's email address.
:param reply_to: The email address to which message bounces and complaints should be sent.
"Return-Path" is sometimes called "envelope from", "envelope sender", or "MAIL FROM".
:param custom_headers: Additional headers to add to the MIME message.
No validations are run on these values and they should be able to be encoded.
:return: Response from Amazon SES service with unique message identifier.
"""
ses_client = self.get_conn()
custom_headers = custom_headers or {}
if reply_to:
custom_headers["Reply-To"] = reply_to
if return_path:
custom_headers["Return-Path"] = return_path
message, recipients = build_mime_message(
mail_from=mail_from,
to=to,
subject=subject,
html_content=html_content,
files=files,
cc=cc,
bcc=bcc,
mime_subtype=mime_subtype,
mime_charset=mime_charset,
custom_headers=custom_headers,
)
return ses_client.send_raw_email(
Source=mail_from, Destinations=recipients, RawMessage={"Data": message.as_string()}
)
| 4,146 | 38.495238 | 98 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/ssm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils.log.secrets_masker import mask_secret
from airflow.utils.types import NOTSET, ArgNotSet
class SsmHook(AwsBaseHook):
"""
Interact with Amazon Systems Manager (SSM).
Provide thin wrapper around :external+boto3:py:class:`boto3.client("ssm") <SSM.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "ssm"
super().__init__(*args, **kwargs)
def get_parameter_value(self, parameter: str, default: str | ArgNotSet = NOTSET) -> str:
"""
Return the provided Parameter or an optional default; if it is encrypted, then decrypt and mask.
.. seealso::
- :external+boto3:py:meth:`SSM.Client.get_parameter`
:param parameter: The SSM Parameter name to return the value for.
:param default: Optional default value to return if none is found.
"""
try:
param = self.conn.get_parameter(Name=parameter, WithDecryption=True)["Parameter"]
value = param["Value"]
if param["Type"] == "SecureString":
mask_secret(value)
return value
except self.conn.exceptions.ParameterNotFound:
if isinstance(default, ArgNotSet):
raise
return default
| 2,393 | 37.612903 | 104 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/ecs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from botocore.waiter import Waiter
from airflow.providers.amazon.aws.exceptions import EcsOperatorError, EcsTaskFailToStart
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.utils import _StringCompareEnum
from airflow.typing_compat import Protocol, runtime_checkable
def should_retry(exception: Exception):
"""Check if exception is related to ECS resource quota (CPU, MEM)."""
if isinstance(exception, EcsOperatorError):
return any(
quota_reason in failure["reason"]
for quota_reason in ["RESOURCE:MEMORY", "RESOURCE:CPU"]
for failure in exception.failures
)
return False
def should_retry_eni(exception: Exception):
"""Check if exception is related to ENI (Elastic Network Interfaces)."""
if isinstance(exception, EcsTaskFailToStart):
return any(
eni_reason in exception.message
for eni_reason in ["network interface provisioning", "ResourceInitializationError"]
)
return False
class EcsClusterStates(_StringCompareEnum):
"""Contains the possible State values of an ECS Cluster."""
ACTIVE = "ACTIVE"
PROVISIONING = "PROVISIONING"
DEPROVISIONING = "DEPROVISIONING"
FAILED = "FAILED"
INACTIVE = "INACTIVE"
class EcsTaskDefinitionStates(_StringCompareEnum):
"""Contains the possible State values of an ECS Task Definition."""
ACTIVE = "ACTIVE"
INACTIVE = "INACTIVE"
DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS"
class EcsTaskStates(_StringCompareEnum):
"""Contains the possible State values of an ECS Task."""
PROVISIONING = "PROVISIONING"
PENDING = "PENDING"
ACTIVATING = "ACTIVATING"
RUNNING = "RUNNING"
DEACTIVATING = "DEACTIVATING"
STOPPING = "STOPPING"
DEPROVISIONING = "DEPROVISIONING"
STOPPED = "STOPPED"
NONE = "NONE"
class EcsHook(AwsGenericHook):
"""
Interact with Amazon Elastic Container Service (ECS).
Provide thin wrapper around :external+boto3:py:class:`boto3.client("ecs") <ECS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `Amazon Elastic Container Service \
<https://docs.aws.amazon.com/AmazonECS/latest/APIReference/Welcome.html>`__
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "ecs"
super().__init__(*args, **kwargs)
def get_cluster_state(self, cluster_name: str) -> str:
"""
Get ECS Cluster state.
.. seealso::
- :external+boto3:py:meth:`ECS.Client.describe_clusters`
:param cluster_name: ECS Cluster name or full cluster Amazon Resource Name (ARN) entry.
"""
return self.conn.describe_clusters(clusters=[cluster_name])["clusters"][0]["status"]
def get_task_definition_state(self, task_definition: str) -> str:
"""
Get ECS Task Definition state.
.. seealso::
- :external+boto3:py:meth:`ECS.Client.describe_task_definition`
:param task_definition: The family for the latest ACTIVE revision,
family and revision ( family:revision ) for a specific revision in the family,
or full Amazon Resource Name (ARN) of the task definition to describe.
"""
return self.conn.describe_task_definition(taskDefinition=task_definition)["taskDefinition"]["status"]
def get_task_state(self, cluster, task) -> str:
"""
Get ECS Task state.
.. seealso::
- :external+boto3:py:meth:`ECS.Client.describe_tasks`
:param cluster: The short name or full Amazon Resource Name (ARN)
of the cluster that hosts the task or tasks to describe.
:param task: Task ID or full ARN entry.
"""
return self.conn.describe_tasks(cluster=cluster, tasks=[task])["tasks"][0]["lastStatus"]
@runtime_checkable
class EcsProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('ecs')``.
This is used for type hints on :py:meth:`.EcsOperator.client`.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
def run_task(self, **kwargs) -> dict:
"""Run a task.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
"""
...
def get_waiter(self, x: str) -> Waiter:
"""Get a waiter.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.get_waiter
"""
...
def describe_tasks(self, cluster: str, tasks) -> dict:
"""Describe tasks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_tasks
"""
...
def stop_task(self, cluster, task, reason: str) -> dict:
"""Stop a task.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.stop_task
"""
...
def describe_task_definition(self, taskDefinition: str) -> dict:
"""Describe a task definition.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_task_definition
"""
...
def list_tasks(self, cluster: str, launchType: str, desiredStatus: str, family: str) -> dict:
"""List tasks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.list_tasks
"""
...
| 6,610 | 33.432292 | 127 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/cloud_formation.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS CloudFormation Hook."""
from __future__ import annotations
from boto3 import client, resource
from botocore.exceptions import ClientError
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class CloudFormationHook(AwsBaseHook):
"""
Interact with AWS CloudFormation.
Provide thin wrapper around
:external+boto3:py:class:`boto3.client("cloudformation") <CloudFormation.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type="cloudformation", *args, **kwargs)
def get_stack_status(self, stack_name: client | resource) -> dict | None:
"""
Get stack status from CloudFormation.
.. seealso::
- :external+boto3:py:meth:`CloudFormation.Client.describe_stacks`
"""
self.log.info("Poking for stack %s", stack_name)
try:
stacks = self.get_conn().describe_stacks(StackName=stack_name)["Stacks"]
return stacks[0]["StackStatus"]
except ClientError as e:
if "does not exist" in str(e):
return None
else:
raise e
def create_stack(self, stack_name: str, cloudformation_parameters: dict) -> None:
"""
Create stack in CloudFormation.
.. seealso::
- :external+boto3:py:meth:`CloudFormation.Client.create_stack`
:param stack_name: stack_name.
:param cloudformation_parameters: parameters to be passed to CloudFormation.
"""
if "StackName" not in cloudformation_parameters:
cloudformation_parameters["StackName"] = stack_name
self.get_conn().create_stack(**cloudformation_parameters)
def delete_stack(self, stack_name: str, cloudformation_parameters: dict | None = None) -> None:
"""
Delete stack in CloudFormation.
.. seealso::
- :external+boto3:py:meth:`CloudFormation.Client.delete_stack`
:param stack_name: stack_name.
:param cloudformation_parameters: parameters to be passed to CloudFormation (optional).
"""
cloudformation_parameters = cloudformation_parameters or {}
if "StackName" not in cloudformation_parameters:
cloudformation_parameters["StackName"] = stack_name
self.get_conn().delete_stack(**cloudformation_parameters)
| 3,378 | 36.131868 | 99 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/ec2.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import functools
import time
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
def only_client_type(func):
@functools.wraps(func)
def checker(self, *args, **kwargs):
if self._api_type == "client_type":
return func(self, *args, **kwargs)
ec2_doc_link = "https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html"
raise AirflowException(
f"""
This method is only callable when using client_type API for interacting with EC2.
Create the EC2Hook object as follows to use this method
ec2 = EC2Hook(api_type="client_type")
Read following for details on client_type and resource_type APIs:
1. {ec2_doc_link}#client
2. {ec2_doc_link}#service-resource
"""
)
return checker
class EC2Hook(AwsBaseHook):
"""
Interact with Amazon Elastic Compute Cloud (EC2).
Provide thick wrapper around :external+boto3:py:class:`boto3.client("ec2") <EC2.Client>`
or :external+boto3:py:class:`boto3.resource("ec2") <EC2.ServiceResource>`.
:param api_type: If set to ``client_type`` then hook use ``boto3.client("ec2")`` capabilities,
If set to ``resource_type`` then hook use ``boto3.resource("ec2")`` capabilities.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
API_TYPES = frozenset({"resource_type", "client_type"})
def __init__(self, api_type="resource_type", *args, **kwargs) -> None:
if api_type not in self.API_TYPES:
raise AirflowException("api_type can only be one of %s", self.API_TYPES)
kwargs[api_type] = "ec2"
self._api_type = api_type
super().__init__(*args, **kwargs)
def get_instance(self, instance_id: str, filters: list | None = None):
"""
Get EC2 instance by id and return it.
:param instance_id: id of the AWS EC2 instance
:param filters: List of filters to specify instances to get
:return: Instance object
"""
if self._api_type == "client_type":
return self.get_instances(filters=filters, instance_ids=[instance_id])
return self.conn.Instance(id=instance_id)
@only_client_type
def stop_instances(self, instance_ids: list) -> dict:
"""
Stop instances with given ids.
:param instance_ids: List of instance ids to stop
:return: Dict with key `StoppingInstances` and value as list of instances being stopped
"""
self.log.info("Stopping instances: %s", instance_ids)
return self.conn.stop_instances(InstanceIds=instance_ids)
@only_client_type
def start_instances(self, instance_ids: list) -> dict:
"""
Start instances with given ids.
:param instance_ids: List of instance ids to start
:return: Dict with key `StartingInstances` and value as list of instances being started
"""
self.log.info("Starting instances: %s", instance_ids)
return self.conn.start_instances(InstanceIds=instance_ids)
@only_client_type
def terminate_instances(self, instance_ids: list) -> dict:
"""
Terminate instances with given ids.
:param instance_ids: List of instance ids to terminate
:return: Dict with key `TerminatingInstances` and value as list of instances being terminated
"""
self.log.info("Terminating instances: %s", instance_ids)
return self.conn.terminate_instances(InstanceIds=instance_ids)
@only_client_type
def describe_instances(self, filters: list | None = None, instance_ids: list | None = None):
"""
Describe EC2 instances, optionally applying filters and selective instance ids.
:param filters: List of filters to specify instances to describe
:param instance_ids: List of instance IDs to describe
:return: Response from EC2 describe_instances API
"""
filters = filters or []
instance_ids = instance_ids or []
self.log.info("Filters provided: %s", filters)
self.log.info("Instance ids provided: %s", instance_ids)
return self.conn.describe_instances(Filters=filters, InstanceIds=instance_ids)
@only_client_type
def get_instances(self, filters: list | None = None, instance_ids: list | None = None) -> list:
"""
Get list of instance details, optionally applying filters and selective instance ids.
:param instance_ids: List of ids to get instances for
:param filters: List of filters to specify instances to get
:return: List of instances
"""
description = self.describe_instances(filters=filters, instance_ids=instance_ids)
return [
instance for reservation in description["Reservations"] for instance in reservation["Instances"]
]
@only_client_type
def get_instance_ids(self, filters: list | None = None) -> list:
"""
Get list of instance ids, optionally applying filters to fetch selective instances.
:param filters: List of filters to specify instances to get
:return: List of instance ids
"""
return [instance["InstanceId"] for instance in self.get_instances(filters=filters)]
async def get_instance_state_async(self, instance_id: str) -> str:
async with self.async_conn as client:
response = await client.describe_instances(InstanceIds=[instance_id])
return response["Reservations"][0]["Instances"][0]["State"]["Name"]
def get_instance_state(self, instance_id: str) -> str:
"""
Get EC2 instance state by id and return it.
:param instance_id: id of the AWS EC2 instance
:return: current state of the instance
"""
if self._api_type == "client_type":
return self.get_instances(instance_ids=[instance_id])[0]["State"]["Name"]
return self.get_instance(instance_id=instance_id).state["Name"]
def wait_for_state(self, instance_id: str, target_state: str, check_interval: float) -> None:
"""
Wait EC2 instance until its state is equal to the target_state.
:param instance_id: id of the AWS EC2 instance
:param target_state: target state of instance
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
:return: None
"""
instance_state = self.get_instance_state(instance_id=instance_id)
while instance_state != target_state:
time.sleep(check_interval)
instance_state = self.get_instance_state(instance_id=instance_id)
self.log.info(
"instance state: %s. Same as target: %s", instance_state, instance_state == target_state
)
| 7,928 | 37.490291 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/sqs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS SQS hook."""
from __future__ import annotations
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class SqsHook(AwsBaseHook):
"""
Interact with Amazon Simple Queue Service.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("sqs") <SQS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "sqs"
super().__init__(*args, **kwargs)
def create_queue(self, queue_name: str, attributes: dict | None = None) -> dict:
"""
Create queue using connection object.
.. seealso::
- :external+boto3:py:meth:`SQS.Client.create_queue`
:param queue_name: name of the queue.
:param attributes: additional attributes for the queue (default: None)
:return: dict with the information about the queue.
"""
return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {})
def send_message(
self,
queue_url: str,
message_body: str,
delay_seconds: int = 0,
message_attributes: dict | None = None,
message_group_id: str | None = None,
) -> dict:
"""
Send message to the queue.
.. seealso::
- :external+boto3:py:meth:`SQS.Client.send_message`
:param queue_url: queue url
:param message_body: the contents of the message
:param delay_seconds: seconds to delay the message
:param message_attributes: additional attributes for the message (default: None)
:param message_group_id: This applies only to FIFO (first-in-first-out) queues. (default: None)
:return: dict with the information about the message sent
"""
params = {
"QueueUrl": queue_url,
"MessageBody": message_body,
"DelaySeconds": delay_seconds,
"MessageAttributes": message_attributes or {},
}
if message_group_id:
params["MessageGroupId"] = message_group_id
return self.get_conn().send_message(**params)
| 3,121 | 35.729412 | 103 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/chime.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a web hook for Chime."""
from __future__ import annotations
import json
import re
from typing import Any
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
class ChimeWebhookHook(HttpHook):
"""Interact with Chime Web Hooks to create notifications.
.. warning:: This hook is only designed to work with web hooks and not chat bots.
:param chime_conn_id: Chime connection ID with Endpoint as "https://hooks.chime.aws" and
the webhook token in the form of ```{webhook.id}?token{webhook.token}```
"""
conn_name_attr = "chime_conn_id"
default_conn_name = "chime_default"
conn_type = "chime"
hook_name = "Chime Web Hook"
def __init__(
self,
chime_conn_id: str,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.webhook_endpoint = self._get_webhook_endpoint(chime_conn_id)
def _get_webhook_endpoint(self, conn_id: str) -> str:
"""
Given a Chime conn_id return the default webhook endpoint.
:param conn_id: The provided connection ID.
:return: Endpoint(str) for chime webhook.
"""
conn = self.get_connection(conn_id)
token = conn.get_password()
if token is None:
raise AirflowException("Webhook token field is missing and is required.")
url = conn.schema + "://" + conn.host
endpoint = url + token
# Check to make sure the endpoint matches what Chime expects
if not re.match(r"^[a-zA-Z0-9_-]+\?token=[a-zA-Z0-9_-]+$", token):
raise AirflowException(
"Expected Chime webhook token in the form of '{webhook.id}?token={webhook.token}'."
)
return endpoint
def _build_chime_payload(self, message: str) -> str:
"""
Builds payload for Chime and ensures messages do not exceed max length allowed.
:param message: The message you want to send to your Chime room.
(max 4096 characters)
"""
payload: dict[str, Any] = {}
# We need to make sure that the message does not exceed the max length for Chime
if len(message) > 4096:
raise AirflowException("Chime message must be 4096 characters or less.")
payload["Content"] = message
return json.dumps(payload)
def send_message(self, message: str) -> None:
"""Execute calling the Chime webhook endpoint.
:param message: The message you want to send to your Chime room.
(max 4096 characters)
"""
chime_payload = self._build_chime_payload(message)
self.run(
endpoint=self.webhook_endpoint, data=chime_payload, headers={"Content-type": "application/json"}
)
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Returns custom field behaviour to only get what is needed for Chime webhooks to function."""
return {
"hidden_fields": ["login", "port", "extra"],
"relabeling": {
"host": "Chime Webhook Endpoint",
"password": "Webhook Token",
},
"placeholders": {
"schema": "https",
"host": "hooks.chime.aws/incomingwebhook/",
"password": "T00000000?token=XXXXXXXXXXXXXXXXXXXXXXXX",
},
}
| 4,263 | 35.444444 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/dynamodb.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Amazon DynamoDB Hook."""
from __future__ import annotations
from typing import Iterable
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class DynamoDBHook(AwsBaseHook):
"""
Interact with Amazon DynamoDB.
Provide thick wrapper around
:external+boto3:py:class:`boto3.resource("dynamodb") <DynamoDB.ServiceResource>`.
:param table_keys: partition key and sort key
:param table_name: target DynamoDB table
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(
self, *args, table_keys: list | None = None, table_name: str | None = None, **kwargs
) -> None:
self.table_keys = table_keys
self.table_name = table_name
kwargs["resource_type"] = "dynamodb"
super().__init__(*args, **kwargs)
def write_batch_data(self, items: Iterable) -> bool:
"""
Write batch items to DynamoDB table with provisioned throughout capacity.
.. seealso::
- :external+boto3:py:meth:`DynamoDB.ServiceResource.Table`
- :external+boto3:py:meth:`DynamoDB.Table.batch_writer`
- :external+boto3:py:meth:`DynamoDB.Table.put_item`
:param items: list of DynamoDB items.
"""
try:
table = self.get_conn().Table(self.table_name)
with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch:
for item in items:
batch.put_item(Item=item)
return True
except Exception as general_error:
raise AirflowException(f"Failed to insert items in dynamodb, error: {str(general_error)}")
| 2,670 | 36.097222 | 102 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/glue_crawler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.sts import StsHook
class GlueCrawlerHook(AwsBaseHook):
"""
Interacts with AWS Glue Crawler.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("glue") <Glue.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `AWS Glue crawlers and classifiers \
<https://docs.aws.amazon.com/glue/latest/dg/components-overview.html#crawling-intro>`__
"""
def __init__(self, *args, **kwargs):
kwargs["client_type"] = "glue"
super().__init__(*args, **kwargs)
@cached_property
def glue_client(self):
""":return: AWS Glue client"""
return self.get_conn()
def has_crawler(self, crawler_name) -> bool:
"""
Checks if the crawler already exists.
:param crawler_name: unique crawler name per AWS account
:return: Returns True if the crawler already exists and False if not.
"""
self.log.info("Checking if crawler already exists: %s", crawler_name)
try:
self.get_crawler(crawler_name)
return True
except self.glue_client.exceptions.EntityNotFoundException:
return False
def get_crawler(self, crawler_name: str) -> dict:
"""
Gets crawler configurations.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.get_crawler`
:param crawler_name: unique crawler name per AWS account
:return: Nested dictionary of crawler configurations
"""
return self.glue_client.get_crawler(Name=crawler_name)["Crawler"]
def update_crawler(self, **crawler_kwargs) -> bool:
"""
Updates crawler configurations.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.update_crawler`
:param crawler_kwargs: Keyword args that define the configurations used for the crawler
:return: True if crawler was updated and false otherwise
"""
crawler_name = crawler_kwargs["Name"]
current_crawler = self.get_crawler(crawler_name)
tags_updated = (
self.update_tags(crawler_name, crawler_kwargs.pop("Tags")) if "Tags" in crawler_kwargs else False
)
update_config = {
key: value
for key, value in crawler_kwargs.items()
if current_crawler.get(key, None) != crawler_kwargs.get(key)
}
if update_config:
self.log.info("Updating crawler: %s", crawler_name)
self.glue_client.update_crawler(**crawler_kwargs)
self.log.info("Updated configurations: %s", update_config)
return True
return tags_updated
def update_tags(self, crawler_name: str, crawler_tags: dict) -> bool:
"""
Updates crawler tags.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.tag_resource`
:param crawler_name: Name of the crawler for which to update tags
:param crawler_tags: Dictionary of new tags. If empty, all tags will be deleted
:return: True if tags were updated and false otherwise
"""
account_number = StsHook(aws_conn_id=self.aws_conn_id).get_account_number()
crawler_arn = (
f"arn:{self.conn_partition}:glue:{self.conn_region_name}:{account_number}:crawler/{crawler_name}"
)
current_crawler_tags: dict = self.glue_client.get_tags(ResourceArn=crawler_arn)["Tags"]
update_tags = {}
delete_tags = []
for key, value in current_crawler_tags.items():
wanted_tag_value = crawler_tags.get(key, None)
if wanted_tag_value is None:
# key is missing from new configuration, mark it for deletion
delete_tags.append(key)
elif wanted_tag_value != value:
update_tags[key] = wanted_tag_value
updated_tags = False
if update_tags:
self.log.info("Updating crawler tags: %s", crawler_name)
self.glue_client.tag_resource(ResourceArn=crawler_arn, TagsToAdd=update_tags)
self.log.info("Updated crawler tags: %s", crawler_name)
updated_tags = True
if delete_tags:
self.log.info("Deleting crawler tags: %s", crawler_name)
self.glue_client.untag_resource(ResourceArn=crawler_arn, TagsToRemove=delete_tags)
self.log.info("Deleted crawler tags: %s", crawler_name)
updated_tags = True
return updated_tags
def create_crawler(self, **crawler_kwargs) -> str:
"""
Creates an AWS Glue Crawler.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.create_crawler`
:param crawler_kwargs: Keyword args that define the configurations used to create the crawler
:return: Name of the crawler
"""
crawler_name = crawler_kwargs["Name"]
self.log.info("Creating crawler: %s", crawler_name)
return self.glue_client.create_crawler(**crawler_kwargs)
def start_crawler(self, crawler_name: str) -> dict:
"""
Triggers the AWS Glue Crawler.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.start_crawler`
:param crawler_name: unique crawler name per AWS account
:return: Empty dictionary
"""
self.log.info("Starting crawler %s", crawler_name)
return self.glue_client.start_crawler(Name=crawler_name)
def wait_for_crawler_completion(self, crawler_name: str, poll_interval: int = 5) -> str:
"""
Wait until Glue crawler completes; returns the status of the latest crawl or raises AirflowException.
:param crawler_name: unique crawler name per AWS account
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check crawler status
:return: Crawler's status
"""
self.get_waiter("crawler_ready").wait(Name=crawler_name, WaiterConfig={"Delay": poll_interval})
# query one extra time to log some info
crawler = self.get_crawler(crawler_name)
self.log.info("crawler_config: %s", crawler)
crawler_status = crawler["LastCrawl"]["Status"]
metrics_response = self.glue_client.get_crawler_metrics(CrawlerNameList=[crawler_name])
metrics = metrics_response["CrawlerMetricsList"][0]
self.log.info("Status: %s", crawler_status)
self.log.info("Last Runtime Duration (seconds): %s", metrics["LastRuntimeSeconds"])
self.log.info("Median Runtime Duration (seconds): %s", metrics["MedianRuntimeSeconds"])
self.log.info("Tables Created: %s", metrics["TablesCreated"])
self.log.info("Tables Updated: %s", metrics["TablesUpdated"])
self.log.info("Tables Deleted: %s", metrics["TablesDeleted"])
return crawler_status
| 7,931 | 39.060606 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/secrets_manager.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import base64
import json
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class SecretsManagerHook(AwsBaseHook):
"""Interact with Amazon SecretsManager Service.
Provide thin wrapper around
:external+boto3:py:class:`boto3.client("secretsmanager") <SecretsManager.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type="secretsmanager", *args, **kwargs)
def get_secret(self, secret_name: str) -> str | bytes:
"""Retrieve secret value from AWS Secrets Manager as a str or bytes.
The value reflects format it stored in the AWS Secrets Manager.
.. seealso::
- :external+boto3:py:meth:`SecretsManager.Client.get_secret_value`
:param secret_name: name of the secrets.
:return: Union[str, bytes] with the information about the secrets
"""
# Depending on whether the secret is a string or binary, one of
# these fields will be populated.
get_secret_value_response = self.get_conn().get_secret_value(SecretId=secret_name)
if "SecretString" in get_secret_value_response:
secret = get_secret_value_response["SecretString"]
else:
secret = base64.b64decode(get_secret_value_response["SecretBinary"])
return secret
def get_secret_as_dict(self, secret_name: str) -> dict:
"""Retrieve secret value from AWS Secrets Manager as a dict.
:param secret_name: name of the secrets.
:return: dict with the information about the secrets
"""
return json.loads(self.get_secret(secret_name))
| 2,667 | 37.666667 | 90 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/eks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Interact with Amazon EKS, using the boto3 library."""
from __future__ import annotations
import base64
import json
import sys
import tempfile
from contextlib import contextmanager
from enum import Enum
from functools import partial
from typing import Callable, Generator
from botocore.exceptions import ClientError
from botocore.signers import RequestSigner
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils import yaml
from airflow.utils.json import AirflowJsonEncoder
DEFAULT_PAGINATION_TOKEN = ""
STS_TOKEN_EXPIRES_IN = 60
AUTHENTICATION_API_VERSION = "client.authentication.k8s.io/v1alpha1"
_POD_USERNAME = "aws"
_CONTEXT_NAME = "aws"
class ClusterStates(Enum):
"""Contains the possible State values of an EKS Cluster."""
CREATING = "CREATING"
ACTIVE = "ACTIVE"
DELETING = "DELETING"
FAILED = "FAILED"
UPDATING = "UPDATING"
NONEXISTENT = "NONEXISTENT"
class FargateProfileStates(Enum):
"""Contains the possible State values of an AWS Fargate profile."""
CREATING = "CREATING"
ACTIVE = "ACTIVE"
DELETING = "DELETING"
CREATE_FAILED = "CREATE_FAILED"
DELETE_FAILED = "DELETE_FAILED"
NONEXISTENT = "NONEXISTENT"
class NodegroupStates(Enum):
"""Contains the possible State values of an EKS Managed Nodegroup."""
CREATING = "CREATING"
ACTIVE = "ACTIVE"
UPDATING = "UPDATING"
DELETING = "DELETING"
CREATE_FAILED = "CREATE_FAILED"
DELETE_FAILED = "DELETE_FAILED"
DEGRADED = "DEGRADED"
NONEXISTENT = "NONEXISTENT"
class EksHook(AwsBaseHook):
"""
Interact with Amazon Elastic Kubernetes Service (EKS).
Provide thin wrapper around :external+boto3:py:class:`boto3.client("eks") <EKS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
client_type = "eks"
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = self.client_type
super().__init__(*args, **kwargs)
def create_cluster(
self,
name: str,
roleArn: str,
resourcesVpcConfig: dict,
**kwargs,
) -> dict:
"""
Creates an Amazon EKS control plane.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.create_cluster`
:param name: The unique name to give to your Amazon EKS Cluster.
:param roleArn: The Amazon Resource Name (ARN) of the IAM role that provides permissions
for the Kubernetes control plane to make calls to AWS API operations on your behalf.
:param resourcesVpcConfig: The VPC configuration used by the cluster control plane.
:return: Returns descriptive information about the created EKS Cluster.
"""
eks_client = self.conn
response = eks_client.create_cluster(
name=name, roleArn=roleArn, resourcesVpcConfig=resourcesVpcConfig, **kwargs
)
self.log.info("Created Amazon EKS cluster with the name %s.", response.get("cluster").get("name"))
return response
def create_nodegroup(
self,
clusterName: str,
nodegroupName: str,
subnets: list[str],
nodeRole: str | None,
*,
tags: dict | None = None,
**kwargs,
) -> dict:
"""
Creates an Amazon EKS managed node group for an Amazon EKS Cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.create_nodegroup`
:param clusterName: The name of the Amazon EKS cluster to create the EKS Managed Nodegroup in.
:param nodegroupName: The unique name to give your managed nodegroup.
:param subnets: The subnets to use for the Auto Scaling group that is created for your nodegroup.
:param nodeRole: The Amazon Resource Name (ARN) of the IAM role to associate with your nodegroup.
:param tags: Optional tags to apply to your nodegroup.
:return: Returns descriptive information about the created EKS Managed Nodegroup.
"""
eks_client = self.conn
# The below tag is mandatory and must have a value of either 'owned' or 'shared'
# A value of 'owned' denotes that the subnets are exclusive to the nodegroup.
# The 'shared' value allows more than one resource to use the subnet.
cluster_tag_key = f"kubernetes.io/cluster/{clusterName}"
resolved_tags = tags or {}
if cluster_tag_key not in resolved_tags:
resolved_tags[cluster_tag_key] = "owned"
response = eks_client.create_nodegroup(
clusterName=clusterName,
nodegroupName=nodegroupName,
subnets=subnets,
nodeRole=nodeRole,
tags=resolved_tags,
**kwargs,
)
self.log.info(
"Created an Amazon EKS managed node group named %s in Amazon EKS cluster %s",
response.get("nodegroup").get("nodegroupName"),
response.get("nodegroup").get("clusterName"),
)
return response
def create_fargate_profile(
self,
clusterName: str,
fargateProfileName: str | None,
podExecutionRoleArn: str | None,
selectors: list,
**kwargs,
) -> dict:
"""
Creates an AWS Fargate profile for an Amazon EKS cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.create_fargate_profile`
:param clusterName: The name of the Amazon EKS cluster to apply the Fargate profile to.
:param fargateProfileName: The name of the Fargate profile.
:param podExecutionRoleArn: The Amazon Resource Name (ARN) of the pod execution role to
use for pods that match the selectors in the Fargate profile.
:param selectors: The selectors to match for pods to use this Fargate profile.
:return: Returns descriptive information about the created Fargate profile.
"""
eks_client = self.conn
response = eks_client.create_fargate_profile(
clusterName=clusterName,
fargateProfileName=fargateProfileName,
podExecutionRoleArn=podExecutionRoleArn,
selectors=selectors,
**kwargs,
)
self.log.info(
"Created AWS Fargate profile with the name %s for Amazon EKS cluster %s.",
response.get("fargateProfile").get("fargateProfileName"),
response.get("fargateProfile").get("clusterName"),
)
return response
def delete_cluster(self, name: str) -> dict:
"""
Deletes the Amazon EKS Cluster control plane.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.delete_cluster`
:param name: The name of the cluster to delete.
:return: Returns descriptive information about the deleted EKS Cluster.
"""
eks_client = self.conn
response = eks_client.delete_cluster(name=name)
self.log.info("Deleted Amazon EKS cluster with the name %s.", response.get("cluster").get("name"))
return response
def delete_nodegroup(self, clusterName: str, nodegroupName: str) -> dict:
"""
Deletes an Amazon EKS managed node group from a specified cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.delete_nodegroup`
:param clusterName: The name of the Amazon EKS Cluster that is associated with your nodegroup.
:param nodegroupName: The name of the nodegroup to delete.
:return: Returns descriptive information about the deleted EKS Managed Nodegroup.
"""
eks_client = self.conn
response = eks_client.delete_nodegroup(clusterName=clusterName, nodegroupName=nodegroupName)
self.log.info(
"Deleted Amazon EKS managed node group named %s from Amazon EKS cluster %s.",
response.get("nodegroup").get("nodegroupName"),
response.get("nodegroup").get("clusterName"),
)
return response
def delete_fargate_profile(self, clusterName: str, fargateProfileName: str) -> dict:
"""
Deletes an AWS Fargate profile from a specified Amazon EKS cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.delete_fargate_profile`
:param clusterName: The name of the Amazon EKS cluster associated with the Fargate profile to delete.
:param fargateProfileName: The name of the Fargate profile to delete.
:return: Returns descriptive information about the deleted Fargate profile.
"""
eks_client = self.conn
response = eks_client.delete_fargate_profile(
clusterName=clusterName, fargateProfileName=fargateProfileName
)
self.log.info(
"Deleted AWS Fargate profile with the name %s from Amazon EKS cluster %s.",
response.get("fargateProfile").get("fargateProfileName"),
response.get("fargateProfile").get("clusterName"),
)
return response
def describe_cluster(self, name: str, verbose: bool = False) -> dict:
"""
Returns descriptive information about an Amazon EKS Cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_cluster`
:param name: The name of the cluster to describe.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: Returns descriptive information about a specific EKS Cluster.
"""
eks_client = self.conn
response = eks_client.describe_cluster(name=name)
self.log.info(
"Retrieved details for Amazon EKS cluster named %s.", response.get("cluster").get("name")
)
if verbose:
cluster_data = response.get("cluster")
self.log.info("Amazon EKS cluster details: %s", json.dumps(cluster_data, cls=AirflowJsonEncoder))
return response
def describe_nodegroup(self, clusterName: str, nodegroupName: str, verbose: bool = False) -> dict:
"""
Returns descriptive information about an Amazon EKS managed node group.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_nodegroup`
:param clusterName: The name of the Amazon EKS Cluster associated with the nodegroup.
:param nodegroupName: The name of the nodegroup to describe.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: Returns descriptive information about a specific EKS Nodegroup.
"""
eks_client = self.conn
response = eks_client.describe_nodegroup(clusterName=clusterName, nodegroupName=nodegroupName)
self.log.info(
"Retrieved details for Amazon EKS managed node group named %s in Amazon EKS cluster %s.",
response.get("nodegroup").get("nodegroupName"),
response.get("nodegroup").get("clusterName"),
)
if verbose:
nodegroup_data = response.get("nodegroup")
self.log.info(
"Amazon EKS managed node group details: %s",
json.dumps(nodegroup_data, cls=AirflowJsonEncoder),
)
return response
def describe_fargate_profile(
self, clusterName: str, fargateProfileName: str, verbose: bool = False
) -> dict:
"""
Returns descriptive information about an AWS Fargate profile.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_fargate_profile`
:param clusterName: The name of the Amazon EKS Cluster associated with the Fargate profile.
:param fargateProfileName: The name of the Fargate profile to describe.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: Returns descriptive information about an AWS Fargate profile.
"""
eks_client = self.conn
response = eks_client.describe_fargate_profile(
clusterName=clusterName, fargateProfileName=fargateProfileName
)
self.log.info(
"Retrieved details for AWS Fargate profile named %s in Amazon EKS cluster %s.",
response.get("fargateProfile").get("fargateProfileName"),
response.get("fargateProfile").get("clusterName"),
)
if verbose:
fargate_profile_data = response.get("fargateProfile")
self.log.info(
"AWS Fargate profile details: %s", json.dumps(fargate_profile_data, cls=AirflowJsonEncoder)
)
return response
def get_cluster_state(self, clusterName: str) -> ClusterStates:
"""
Returns the current status of a given Amazon EKS Cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_cluster`
:param clusterName: The name of the cluster to check.
:return: Returns the current status of a given Amazon EKS Cluster.
"""
eks_client = self.conn
try:
return ClusterStates(eks_client.describe_cluster(name=clusterName).get("cluster").get("status"))
except ClientError as ex:
if ex.response.get("Error").get("Code") == "ResourceNotFoundException":
return ClusterStates.NONEXISTENT
raise
def get_fargate_profile_state(self, clusterName: str, fargateProfileName: str) -> FargateProfileStates:
"""
Returns the current status of a given AWS Fargate profile.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_fargate_profile`
:param clusterName: The name of the Amazon EKS Cluster associated with the Fargate profile.
:param fargateProfileName: The name of the Fargate profile to check.
:return: Returns the current status of a given AWS Fargate profile.
"""
eks_client = self.conn
try:
return FargateProfileStates(
eks_client.describe_fargate_profile(
clusterName=clusterName, fargateProfileName=fargateProfileName
)
.get("fargateProfile")
.get("status")
)
except ClientError as ex:
if ex.response.get("Error").get("Code") == "ResourceNotFoundException":
return FargateProfileStates.NONEXISTENT
raise
def get_nodegroup_state(self, clusterName: str, nodegroupName: str) -> NodegroupStates:
"""
Returns the current status of a given Amazon EKS managed node group.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_nodegroup`
:param clusterName: The name of the Amazon EKS Cluster associated with the nodegroup.
:param nodegroupName: The name of the nodegroup to check.
:return: Returns the current status of a given Amazon EKS Nodegroup.
"""
eks_client = self.conn
try:
return NodegroupStates(
eks_client.describe_nodegroup(clusterName=clusterName, nodegroupName=nodegroupName)
.get("nodegroup")
.get("status")
)
except ClientError as ex:
if ex.response.get("Error").get("Code") == "ResourceNotFoundException":
return NodegroupStates.NONEXISTENT
raise
def list_clusters(
self,
verbose: bool = False,
) -> list:
"""
Lists all Amazon EKS Clusters in your AWS account.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.list_clusters`
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A List containing the cluster names.
"""
eks_client = self.conn
list_cluster_call = partial(eks_client.list_clusters)
return self._list_all(api_call=list_cluster_call, response_key="clusters", verbose=verbose)
def list_nodegroups(
self,
clusterName: str,
verbose: bool = False,
) -> list:
"""
Lists all Amazon EKS managed node groups associated with the specified cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.list_nodegroups`
:param clusterName: The name of the Amazon EKS Cluster containing nodegroups to list.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A List of nodegroup names within the given cluster.
"""
eks_client = self.conn
list_nodegroups_call = partial(eks_client.list_nodegroups, clusterName=clusterName)
return self._list_all(api_call=list_nodegroups_call, response_key="nodegroups", verbose=verbose)
def list_fargate_profiles(
self,
clusterName: str,
verbose: bool = False,
) -> list:
"""
Lists all AWS Fargate profiles associated with the specified cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.list_fargate_profiles`
:param clusterName: The name of the Amazon EKS Cluster containing Fargate profiles to list.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A list of Fargate profile names within a given cluster.
"""
eks_client = self.conn
list_fargate_profiles_call = partial(eks_client.list_fargate_profiles, clusterName=clusterName)
return self._list_all(
api_call=list_fargate_profiles_call, response_key="fargateProfileNames", verbose=verbose
)
def _list_all(self, api_call: Callable, response_key: str, verbose: bool) -> list:
"""
Repeatedly calls a provided boto3 API Callable and collates the responses into a List.
:param api_call: The api command to execute.
:param response_key: Which dict key to collect into the final list.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A List of the combined results of the provided API call.
"""
name_collection: list = []
token = DEFAULT_PAGINATION_TOKEN
while token is not None:
response = api_call(nextToken=token)
# If response list is not empty, append it to the running list.
name_collection += filter(None, response.get(response_key))
token = response.get("nextToken")
self.log.info("Retrieved list of %s %s.", len(name_collection), response_key)
if verbose:
self.log.info("%s found: %s", response_key.title(), name_collection)
return name_collection
@contextmanager
def generate_config_file(
self,
eks_cluster_name: str,
pod_namespace: str | None,
) -> Generator[str, None, None]:
"""
Writes the kubeconfig file given an EKS Cluster.
:param eks_cluster_name: The name of the cluster to generate kubeconfig file for.
:param pod_namespace: The namespace to run within kubernetes.
"""
# Set up the client
eks_client = self.conn
# Get cluster details
cluster = eks_client.describe_cluster(name=eks_cluster_name)
cluster_cert = cluster["cluster"]["certificateAuthority"]["data"]
cluster_ep = cluster["cluster"]["endpoint"]
cluster_config = {
"apiVersion": "v1",
"kind": "Config",
"clusters": [
{
"cluster": {"server": cluster_ep, "certificate-authority-data": cluster_cert},
"name": eks_cluster_name,
}
],
"contexts": [
{
"context": {
"cluster": eks_cluster_name,
"namespace": pod_namespace,
"user": _POD_USERNAME,
},
"name": _CONTEXT_NAME,
}
],
"current-context": _CONTEXT_NAME,
"preferences": {},
"users": [
{
"name": _POD_USERNAME,
"user": {
"exec": {
"apiVersion": AUTHENTICATION_API_VERSION,
"command": sys.executable,
"args": [
"-m",
"airflow.providers.amazon.aws.utils.eks_get_token",
*(
["--region-name", self.region_name]
if self.region_name is not None
else []
),
*(
["--aws-conn-id", self.aws_conn_id]
if self.aws_conn_id is not None
else []
),
"--cluster-name",
eks_cluster_name,
],
"env": [
{
"name": "AIRFLOW__LOGGING__LOGGING_LEVEL",
"value": "FATAL",
}
],
"interactiveMode": "Never",
}
},
}
],
}
config_text = yaml.dump(cluster_config, default_flow_style=False)
with tempfile.NamedTemporaryFile(mode="w") as config_file:
config_file.write(config_text)
config_file.flush()
yield config_file.name
def fetch_access_token_for_cluster(self, eks_cluster_name: str) -> str:
session = self.get_session()
service_id = self.conn.meta.service_model.service_id
sts_url = (
f"https://sts.{session.region_name}.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15"
)
signer = RequestSigner(
service_id=service_id,
region_name=session.region_name,
signing_name="sts",
signature_version="v4",
credentials=session.get_credentials(),
event_emitter=session.events,
)
request_params = {
"method": "GET",
"url": sts_url,
"body": {},
"headers": {"x-k8s-aws-id": eks_cluster_name},
"context": {},
}
signed_url = signer.generate_presigned_url(
request_dict=request_params,
region_name=session.region_name,
expires_in=STS_TOKEN_EXPIRES_IN,
operation_name="",
)
base64_url = base64.urlsafe_b64encode(signed_url.encode("utf-8")).decode("utf-8")
# remove any base64 encoding padding:
return "k8s-aws-v1." + base64_url.rstrip("=")
| 23,958 | 36.088235 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/rds.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Interact with AWS RDS."""
from __future__ import annotations
import time
from typing import TYPE_CHECKING, Callable
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.utils.waiter_with_logging import wait
if TYPE_CHECKING:
from mypy_boto3_rds import RDSClient # noqa
class RdsHook(AwsGenericHook["RDSClient"]):
"""
Interact with Amazon Relational Database Service (RDS).
Provide thin wrapper around :external+boto3:py:class:`boto3.client("rds") <RDS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `Amazon RDS and Aurora Documentation \
<https://docs.aws.amazon.com/rds/index.html>`__
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "rds"
super().__init__(*args, **kwargs)
def get_db_snapshot_state(self, snapshot_id: str) -> str:
"""
Get the current state of a DB instance snapshot.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_snapshots`
:param snapshot_id: The ID of the target DB instance snapshot
:return: Returns the status of the DB snapshot as a string (eg. "available")
:raises AirflowNotFoundException: If the DB instance snapshot does not exist.
"""
try:
response = self.conn.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "DBSnapshotNotFound":
raise AirflowNotFoundException(e)
raise e
return response["DBSnapshots"][0]["Status"].lower()
def wait_for_db_snapshot_state(
self, snapshot_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Snapshots until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_snapshots`
:param snapshot_id: The ID of the target DB instance snapshot
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_snapshot_state(snapshot_id)
target_state = target_state.lower()
if target_state in ("available", "deleted", "completed"):
waiter = self.conn.get_waiter(f"db_snapshot_{target_state}") # type: ignore
waiter.wait(
DBSnapshotIdentifier=snapshot_id,
WaiterConfig={"Delay": check_interval, "MaxAttempts": max_attempts},
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB snapshot '%s' reached the '%s' state", snapshot_id, target_state)
def get_db_cluster_snapshot_state(self, snapshot_id: str) -> str:
"""
Get the current state of a DB cluster snapshot.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_cluster_snapshots`
:param snapshot_id: The ID of the target DB cluster.
:return: Returns the status of the DB cluster snapshot as a string (eg. "available")
:raises AirflowNotFoundException: If the DB cluster snapshot does not exist.
"""
try:
response = self.conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "DBClusterSnapshotNotFoundFault":
raise AirflowNotFoundException(e)
raise e
return response["DBClusterSnapshots"][0]["Status"].lower()
def wait_for_db_cluster_snapshot_state(
self, snapshot_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Cluster Snapshots until target_state is reached; raise AirflowException after a max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_cluster_snapshots`
:param snapshot_id: The ID of the target DB cluster snapshot
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_cluster_snapshot_state(snapshot_id)
target_state = target_state.lower()
if target_state in ("available", "deleted"):
waiter = self.conn.get_waiter(f"db_cluster_snapshot_{target_state}") # type: ignore
waiter.wait(
DBClusterSnapshotIdentifier=snapshot_id,
WaiterConfig={"Delay": check_interval, "MaxAttempts": max_attempts},
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB cluster snapshot '%s' reached the '%s' state", snapshot_id, target_state)
def get_export_task_state(self, export_task_id: str) -> str:
"""
Gets the current state of an RDS snapshot export to Amazon S3.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_export_tasks`
:param export_task_id: The identifier of the target snapshot export task.
:return: Returns the status of the snapshot export task as a string (eg. "canceled")
:raises AirflowNotFoundException: If the export task does not exist.
"""
try:
response = self.conn.describe_export_tasks(ExportTaskIdentifier=export_task_id)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ExportTaskNotFoundFault":
raise AirflowNotFoundException(e)
raise e
return response["ExportTasks"][0]["Status"].lower()
def wait_for_export_task_state(
self, export_task_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll export tasks until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_export_tasks`
:param export_task_id: The identifier of the target snapshot export task.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_export_task_state(export_task_id)
target_state = target_state.lower()
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("export task '%s' reached the '%s' state", export_task_id, target_state)
def get_event_subscription_state(self, subscription_name: str) -> str:
"""
Gets the current state of an RDS snapshot export to Amazon S3.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_event_subscriptions`
:param subscription_name: The name of the target RDS event notification subscription.
:return: Returns the status of the event subscription as a string (eg. "active")
:raises AirflowNotFoundException: If the event subscription does not exist.
"""
try:
response = self.conn.describe_event_subscriptions(SubscriptionName=subscription_name)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "SubscriptionNotFoundFault":
raise AirflowNotFoundException(e)
raise e
return response["EventSubscriptionsList"][0]["Status"].lower()
def wait_for_event_subscription_state(
self, subscription_name: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll Event Subscriptions until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_event_subscriptions`
:param subscription_name: The name of the target RDS event notification subscription.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_event_subscription_state(subscription_name)
target_state = target_state.lower()
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("event subscription '%s' reached the '%s' state", subscription_name, target_state)
def get_db_instance_state(self, db_instance_id: str) -> str:
"""
Get the current state of a DB instance.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_instances`
:param db_instance_id: The ID of the target DB instance.
:return: Returns the status of the DB instance as a string (eg. "available")
:raises AirflowNotFoundException: If the DB instance does not exist.
"""
try:
response = self.conn.describe_db_instances(DBInstanceIdentifier=db_instance_id)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "DBInstanceNotFoundFault":
raise AirflowNotFoundException(e)
raise e
return response["DBInstances"][0]["DBInstanceStatus"].lower()
def wait_for_db_instance_state(
self, db_instance_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Instances until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_instances`
:param db_instance_id: The ID of the target DB instance.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_instance_state(db_instance_id)
target_state = target_state.lower()
if target_state in ("available", "deleted"):
waiter = self.conn.get_waiter(f"db_instance_{target_state}") # type: ignore
wait(
waiter=waiter,
waiter_delay=check_interval,
waiter_max_attempts=max_attempts,
args={"DBInstanceIdentifier": db_instance_id},
failure_message=f"Rdb DB instance failed to reach state {target_state}",
status_message="Rds DB instance state is",
status_args=["DBInstances[0].DBInstanceStatus"],
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB cluster snapshot '%s' reached the '%s' state", db_instance_id, target_state)
def get_db_cluster_state(self, db_cluster_id: str) -> str:
"""
Get the current state of a DB cluster.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_clusters`
:param db_cluster_id: The ID of the target DB cluster.
:return: Returns the status of the DB cluster as a string (eg. "available")
:raises AirflowNotFoundException: If the DB cluster does not exist.
"""
try:
response = self.conn.describe_db_clusters(DBClusterIdentifier=db_cluster_id)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "DBClusterNotFoundFault":
raise AirflowNotFoundException(e)
raise e
return response["DBClusters"][0]["Status"].lower()
def wait_for_db_cluster_state(
self, db_cluster_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Clusters until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_clusters`
:param db_cluster_id: The ID of the target DB cluster.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_cluster_state(db_cluster_id)
target_state = target_state.lower()
if target_state in ("available", "deleted"):
waiter = self.conn.get_waiter(f"db_cluster_{target_state}") # type: ignore
waiter.wait(
DBClusterIdentifier=db_cluster_id,
WaiterConfig={"Delay": check_interval, "MaxAttempts": max_attempts},
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB cluster snapshot '%s' reached the '%s' state", db_cluster_id, target_state)
def _wait_for_state(
self,
poke: Callable[..., str],
target_state: str,
check_interval: int,
max_attempts: int,
) -> None:
"""
Polls the poke function for the current state until it reaches the target_state.
:param poke: A function that returns the current state of the target resource as a string.
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
state = poke()
tries = 1
while state != target_state:
self.log.info("Current state is %s", state)
if tries >= max_attempts:
raise AirflowException("Max attempts exceeded")
time.sleep(check_interval)
state = poke()
tries += 1
| 15,479 | 42.728814 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/step_function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class StepFunctionHook(AwsBaseHook):
"""
Interact with an AWS Step Functions State Machine.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("stepfunctions") <SFN.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "stepfunctions"
super().__init__(*args, **kwargs)
def start_execution(
self,
state_machine_arn: str,
name: str | None = None,
state_machine_input: dict | str | None = None,
) -> str:
"""
Start Execution of the State Machine.
.. seealso::
- :external+boto3:py:meth:`SFN.Client.start_execution`
:param state_machine_arn: AWS Step Function State Machine ARN.
:param name: The name of the execution.
:param state_machine_input: JSON data input to pass to the State Machine.
:return: Execution ARN.
"""
execution_args = {"stateMachineArn": state_machine_arn}
if name is not None:
execution_args["name"] = name
if state_machine_input is not None:
if isinstance(state_machine_input, str):
execution_args["input"] = state_machine_input
elif isinstance(state_machine_input, dict):
execution_args["input"] = json.dumps(state_machine_input)
self.log.info("Executing Step Function State Machine: %s", state_machine_arn)
response = self.conn.start_execution(**execution_args)
return response.get("executionArn")
def describe_execution(self, execution_arn: str) -> dict:
"""
Describes a State Machine Execution.
.. seealso::
- :external+boto3:py:meth:`SFN.Client.describe_execution`
:param execution_arn: ARN of the State Machine Execution.
:return: Dict with execution details.
"""
return self.get_conn().describe_execution(executionArn=execution_arn)
| 3,074 | 36.048193 | 102 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/sts.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class StsHook(AwsBaseHook):
"""
Interact with AWS Security Token Service (STS).
Provide thin wrapper around :external+boto3:py:class:`boto3.client("sts") <STS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type="sts", *args, **kwargs)
def get_account_number(self) -> str:
"""Get the account Number.
.. seealso::
- :external+boto3:py:meth:`STS.Client.get_caller_identity`
"""
try:
return self.get_conn().get_caller_identity()["Account"]
except Exception as general_error:
self.log.error("Failed to get the AWS Account Number, error: %s", general_error)
raise
| 1,818 | 36.122449 | 92 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/glacier.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class GlacierHook(AwsBaseHook):
"""Interact with Amazon Glacier.
This is a thin wrapper around
:external+boto3:py:class:`boto3.client("glacier") <Glacier.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, aws_conn_id: str = "aws_default") -> None:
super().__init__(client_type="glacier")
self.aws_conn_id = aws_conn_id
def retrieve_inventory(self, vault_name: str) -> dict[str, Any]:
"""Initiate an Amazon Glacier inventory-retrieval job.
.. seealso::
- :external+boto3:py:meth:`Glacier.Client.initiate_job`
:param vault_name: the Glacier vault on which job is executed
"""
job_params = {"Type": "inventory-retrieval"}
self.log.info("Retrieving inventory for vault: %s", vault_name)
response = self.get_conn().initiate_job(vaultName=vault_name, jobParameters=job_params)
self.log.info("Initiated inventory-retrieval job for: %s", vault_name)
self.log.info("Retrieval Job ID: %s", response["jobId"])
return response
def retrieve_inventory_results(self, vault_name: str, job_id: str) -> dict[str, Any]:
"""Retrieve the results of an Amazon Glacier inventory-retrieval job.
.. seealso::
- :external+boto3:py:meth:`Glacier.Client.get_job_output`
:param vault_name: the Glacier vault on which job is executed
:param job_id: the job ID was returned by retrieve_inventory()
"""
self.log.info("Retrieving the job results for vault: %s...", vault_name)
response = self.get_conn().get_job_output(vaultName=vault_name, jobId=job_id)
return response
def describe_job(self, vault_name: str, job_id: str) -> dict[str, Any]:
"""Retrieve the status of an Amazon S3 Glacier job.
.. seealso::
- :external+boto3:py:meth:`Glacier.Client.describe_job`
:param vault_name: the Glacier vault on which job is executed
:param job_id: the job ID was returned by retrieve_inventory()
"""
self.log.info("Retrieving status for vault: %s and job %s", vault_name, job_id)
response = self.get_conn().describe_job(vaultName=vault_name, jobId=job_id)
self.log.info("Job status: %s, code status: %s", response["Action"], response["StatusCode"])
return response
| 3,452 | 40.60241 | 100 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/appflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
if TYPE_CHECKING:
from mypy_boto3_appflow.client import AppflowClient
from mypy_boto3_appflow.type_defs import TaskTypeDef
class AppflowHook(AwsBaseHook):
"""
Interact with Amazon Appflow.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("appflow") <Appflow.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `Amazon Appflow API Reference <https://docs.aws.amazon.com/appflow/1.0/APIReference/Welcome.html>`__
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "appflow"
super().__init__(*args, **kwargs)
@cached_property
def conn(self) -> AppflowClient:
"""Get the underlying boto3 Appflow client (cached)."""
return super().conn
def run_flow(self, flow_name: str, poll_interval: int = 20, wait_for_completion: bool = True) -> str:
"""
Execute an AppFlow run.
:param flow_name: The flow name
:param poll_interval: Time (seconds) to wait between two consecutive calls to check the run status
:param wait_for_completion: whether to wait for the run to end to return
:return: The run execution ID
"""
response_start = self.conn.start_flow(flowName=flow_name)
execution_id = response_start["executionId"]
self.log.info("executionId: %s", execution_id)
if wait_for_completion:
self.get_waiter("run_complete", {"EXECUTION_ID": execution_id}).wait(
flowName=flow_name,
WaiterConfig={"Delay": poll_interval},
)
self._log_execution_description(flow_name, execution_id)
return execution_id
def _log_execution_description(self, flow_name: str, execution_id: str):
response_desc = self.conn.describe_flow_execution_records(flowName=flow_name)
last_execs = {fe["executionId"]: fe for fe in response_desc["flowExecutions"]}
exec_details = last_execs[execution_id]
self.log.info("Run complete, execution details: %s", exec_details)
def update_flow_filter(
self, flow_name: str, filter_tasks: list[TaskTypeDef], set_trigger_ondemand: bool = False
) -> None:
"""
Update the flow task filter; all filters will be removed if an empty array is passed to filter_tasks.
:param flow_name: The flow name
:param filter_tasks: List flow tasks to be added
:param set_trigger_ondemand: If True, set the trigger to on-demand; otherwise, keep the trigger as is
:return: None
"""
response = self.conn.describe_flow(flowName=flow_name)
connector_type = response["sourceFlowConfig"]["connectorType"]
tasks: list[TaskTypeDef] = []
# cleanup old filter tasks
for task in response["tasks"]:
if (
task["taskType"] == "Filter"
and task.get("connectorOperator", {}).get(connector_type) != "PROJECTION"
):
self.log.info("Removing task: %s", task)
else:
tasks.append(task) # List of non-filter tasks
tasks += filter_tasks # Add the new filter tasks
if set_trigger_ondemand:
# Clean up attribute to force on-demand trigger
del response["triggerConfig"]["triggerProperties"]
self.conn.update_flow(
flowName=response["flowName"],
destinationFlowConfigList=response["destinationFlowConfigList"],
sourceFlowConfig=response["sourceFlowConfig"],
triggerConfig=response["triggerConfig"],
description=response.get("description", "Flow description."),
tasks=tasks,
)
| 4,835 | 39.638655 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/quicksight.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from functools import cached_property
from botocore.exceptions import ClientError
from airflow import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.sts import StsHook
class QuickSightHook(AwsBaseHook):
"""
Interact with Amazon QuickSight.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("quicksight") <QuickSight.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
NON_TERMINAL_STATES = {"INITIALIZED", "QUEUED", "RUNNING"}
FAILED_STATES = {"FAILED"}
def __init__(self, *args, **kwargs):
super().__init__(client_type="quicksight", *args, **kwargs)
@cached_property
def sts_hook(self):
return StsHook(aws_conn_id=self.aws_conn_id)
def create_ingestion(
self,
data_set_id: str,
ingestion_id: str,
ingestion_type: str,
wait_for_completion: bool = True,
check_interval: int = 30,
) -> dict:
"""
Creates and starts a new SPICE ingestion for a dataset. Refreshes the SPICE datasets.
.. seealso::
- :external+boto3:py:meth:`QuickSight.Client.create_ingestion`
:param data_set_id: ID of the dataset used in the ingestion.
:param ingestion_id: ID for the ingestion.
:param ingestion_type: Type of ingestion . "INCREMENTAL_REFRESH"|"FULL_REFRESH"
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of QuickSight Ingestion
:return: Returns descriptive information about the created data ingestion
having Ingestion ARN, HTTP status, ingestion ID and ingestion status.
"""
self.log.info("Creating QuickSight Ingestion for data set id %s.", data_set_id)
quicksight_client = self.get_conn()
try:
aws_account_id = self.sts_hook.get_account_number()
create_ingestion_response = quicksight_client.create_ingestion(
DataSetId=data_set_id,
IngestionId=ingestion_id,
IngestionType=ingestion_type,
AwsAccountId=aws_account_id,
)
if wait_for_completion:
self.wait_for_state(
aws_account_id=aws_account_id,
data_set_id=data_set_id,
ingestion_id=ingestion_id,
target_state={"COMPLETED"},
check_interval=check_interval,
)
return create_ingestion_response
except Exception as general_error:
self.log.error("Failed to run Amazon QuickSight create_ingestion API, error: %s", general_error)
raise
def get_status(self, aws_account_id: str, data_set_id: str, ingestion_id: str) -> str:
"""
Get the current status of QuickSight Create Ingestion API.
.. seealso::
- :external+boto3:py:meth:`QuickSight.Client.describe_ingestion`
:param aws_account_id: An AWS Account ID
:param data_set_id: QuickSight Data Set ID
:param ingestion_id: QuickSight Ingestion ID
:return: An QuickSight Ingestion Status
"""
try:
describe_ingestion_response = self.get_conn().describe_ingestion(
AwsAccountId=aws_account_id, DataSetId=data_set_id, IngestionId=ingestion_id
)
return describe_ingestion_response["Ingestion"]["IngestionStatus"]
except KeyError as e:
raise AirflowException(f"Could not get status of the Amazon QuickSight Ingestion: {e}")
except ClientError as e:
raise AirflowException(f"AWS request failed: {e}")
def get_error_info(self, aws_account_id: str, data_set_id: str, ingestion_id: str) -> dict | None:
"""
Gets info about the error if any.
:param aws_account_id: An AWS Account ID
:param data_set_id: QuickSight Data Set ID
:param ingestion_id: QuickSight Ingestion ID
:return: Error info dict containing the error type (key 'Type') and message (key 'Message')
if available. Else, returns None.
"""
describe_ingestion_response = self.get_conn().describe_ingestion(
AwsAccountId=aws_account_id, DataSetId=data_set_id, IngestionId=ingestion_id
)
# using .get() to get None if the key is not present, instead of an exception.
return describe_ingestion_response["Ingestion"].get("ErrorInfo")
def wait_for_state(
self,
aws_account_id: str,
data_set_id: str,
ingestion_id: str,
target_state: set,
check_interval: int,
):
"""
Check status of a QuickSight Create Ingestion API.
:param aws_account_id: An AWS Account ID
:param data_set_id: QuickSight Data Set ID
:param ingestion_id: QuickSight Ingestion ID
:param target_state: Describes the QuickSight Job's Target State
:param check_interval: the time interval in seconds which the operator
will check the status of QuickSight Ingestion
:return: response of describe_ingestion call after Ingestion is is done
"""
while True:
status = self.get_status(aws_account_id, data_set_id, ingestion_id)
self.log.info("Current status is %s", status)
if status in self.FAILED_STATES:
info = self.get_error_info(aws_account_id, data_set_id, ingestion_id)
raise AirflowException(f"The Amazon QuickSight Ingestion failed. Error info: {info}")
if status == "CANCELLED":
raise AirflowException("The Amazon QuickSight SPICE ingestion cancelled!")
if status not in self.NON_TERMINAL_STATES or status == target_state:
break
time.sleep(check_interval)
self.log.info("QuickSight Ingestion completed")
return status
| 7,094 | 40.491228 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/elasticache_replication_group.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from time import sleep
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class ElastiCacheReplicationGroupHook(AwsBaseHook):
"""
Interact with Amazon ElastiCache.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("elasticache") <ElastiCache.Client>`.
:param max_retries: Max retries for checking availability of and deleting replication group
If this is not supplied then this is defaulted to 10
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to 1
:param initial_poke_interval: Initial sleep time in seconds
If this is not supplied then this is defaulted to 60 seconds
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
TERMINAL_STATES = frozenset({"available", "create-failed", "deleting"})
def __init__(
self,
max_retries: int = 10,
exponential_back_off_factor: float = 1,
initial_poke_interval: float = 60,
*args,
**kwargs,
):
self.max_retries = max_retries
self.exponential_back_off_factor = exponential_back_off_factor
self.initial_poke_interval = initial_poke_interval
kwargs["client_type"] = "elasticache"
super().__init__(*args, **kwargs)
def create_replication_group(self, config: dict) -> dict:
"""
Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
.. seealso::
- :external+boto3:py:meth:`ElastiCache.Client.create_replication_group`
:param config: Configuration for creating the replication group
:return: Response from ElastiCache create replication group API
"""
return self.conn.create_replication_group(**config)
def delete_replication_group(self, replication_group_id: str) -> dict:
"""
Deletes an existing replication group.
.. seealso::
- :external+boto3:py:meth:`ElastiCache.Client.delete_replication_group`
:param replication_group_id: ID of replication group to delete
:return: Response from ElastiCache delete replication group API
"""
return self.conn.delete_replication_group(ReplicationGroupId=replication_group_id)
def describe_replication_group(self, replication_group_id: str) -> dict:
"""
Get information about a particular replication group.
.. seealso::
- :external+boto3:py:meth:`ElastiCache.Client.describe_replication_groups`
:param replication_group_id: ID of replication group to describe
:return: Response from ElastiCache describe replication group API
"""
return self.conn.describe_replication_groups(ReplicationGroupId=replication_group_id)
def get_replication_group_status(self, replication_group_id: str) -> str:
"""
Get current status of replication group.
.. seealso::
- :external+boto3:py:meth:`ElastiCache.Client.describe_replication_groups`
:param replication_group_id: ID of replication group to check for status
:return: Current status of replication group
"""
return self.describe_replication_group(replication_group_id)["ReplicationGroups"][0]["Status"]
def is_replication_group_available(self, replication_group_id: str) -> bool:
"""
Helper for checking if replication group is available or not.
:param replication_group_id: ID of replication group to check for availability
:return: True if available else False
"""
return self.get_replication_group_status(replication_group_id) == "available"
def wait_for_availability(
self,
replication_group_id: str,
initial_sleep_time: float | None = None,
exponential_back_off_factor: float | None = None,
max_retries: int | None = None,
) -> bool:
"""
Check if replication group is available or not by performing a describe over it.
:param replication_group_id: ID of replication group to check for availability
:param initial_sleep_time: Initial sleep time in seconds
If this is not supplied then this is defaulted to class level value
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to class level value
:param max_retries: Max retries for checking availability of replication group
If this is not supplied then this is defaulted to class level value
:return: True if replication is available else False
"""
sleep_time = initial_sleep_time or self.initial_poke_interval
exponential_back_off_factor = exponential_back_off_factor or self.exponential_back_off_factor
max_retries = max_retries or self.max_retries
num_tries = 0
status = "not-found"
stop_poking = False
while not stop_poking and num_tries <= max_retries:
status = self.get_replication_group_status(replication_group_id=replication_group_id)
stop_poking = status in self.TERMINAL_STATES
self.log.info(
"Current status of replication group with ID %s is %s", replication_group_id, status
)
if not stop_poking:
num_tries += 1
# No point in sleeping if all tries have exhausted
if num_tries > max_retries:
break
self.log.info("Poke retry %s. Sleep time %s seconds. Sleeping...", num_tries, sleep_time)
sleep(sleep_time)
sleep_time *= exponential_back_off_factor
if status != "available":
self.log.warning('Replication group is not available. Current status is "%s"', status)
return False
return True
def wait_for_deletion(
self,
replication_group_id: str,
initial_sleep_time: float | None = None,
exponential_back_off_factor: float | None = None,
max_retries: int | None = None,
):
"""
Helper for deleting a replication group ensuring it is either deleted or can't be deleted.
:param replication_group_id: ID of replication to delete
:param initial_sleep_time: Initial sleep time in second
If this is not supplied then this is defaulted to class level value
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to class level value
:param max_retries: Max retries for checking availability of replication group
If this is not supplied then this is defaulted to class level value
:return: Response from ElastiCache delete replication group API and flag to identify if deleted or not
"""
deleted = False
sleep_time = initial_sleep_time or self.initial_poke_interval
exponential_back_off_factor = exponential_back_off_factor or self.exponential_back_off_factor
max_retries = max_retries or self.max_retries
num_tries = 0
response = None
while not deleted and num_tries <= max_retries:
try:
status = self.get_replication_group_status(replication_group_id=replication_group_id)
self.log.info(
"Current status of replication group with ID %s is %s", replication_group_id, status
)
# Can only delete if status is `available`
# Status becomes `deleting` after this call so this will only run once
if status == "available":
self.log.info("Initiating delete and then wait for it to finish")
response = self.delete_replication_group(replication_group_id=replication_group_id)
except self.conn.exceptions.ReplicationGroupNotFoundFault:
self.log.info("Replication group with ID '%s' does not exist", replication_group_id)
deleted = True
# This should never occur as we only issue a delete request when status is `available`
# which is a valid status for deletion. Still handling for safety.
except self.conn.exceptions.InvalidReplicationGroupStateFault as exp:
# status Error Response
# creating - Cache cluster <cluster_id> is not in a valid state to be deleted.
# deleting - Replication group <replication_group_id> has status deleting which is not valid
# for deletion.
# modifying - Replication group <replication_group_id> has status deleting which is not valid
# for deletion.
message = exp.response["Error"]["Message"]
self.log.warning("Received error message from AWS ElastiCache API : %s", message)
if not deleted:
num_tries += 1
# No point in sleeping if all tries have exhausted
if num_tries > max_retries:
break
self.log.info("Poke retry %s. Sleep time %s seconds. Sleeping...", num_tries, sleep_time)
sleep(sleep_time)
sleep_time *= exponential_back_off_factor
return response, deleted
def ensure_delete_replication_group(
self,
replication_group_id: str,
initial_sleep_time: float | None = None,
exponential_back_off_factor: float | None = None,
max_retries: int | None = None,
) -> dict:
"""
Delete a replication group ensuring it is either deleted or can't be deleted.
:param replication_group_id: ID of replication to delete
:param initial_sleep_time: Initial sleep time in second
If this is not supplied then this is defaulted to class level value
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to class level value
:param max_retries: Max retries for checking availability of replication group
If this is not supplied then this is defaulted to class level value
:return: Response from ElastiCache delete replication group API
:raises AirflowException: If replication group is not deleted
"""
self.log.info("Deleting replication group with ID %s", replication_group_id)
response, deleted = self.wait_for_deletion(
replication_group_id=replication_group_id,
initial_sleep_time=initial_sleep_time,
exponential_back_off_factor=exponential_back_off_factor,
max_retries=max_retries,
)
if not deleted:
raise AirflowException(f'Replication group could not be deleted. Response "{response}"')
return response
| 12,119 | 41.978723 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/ecr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import base64
import logging
from dataclasses import dataclass
from datetime import datetime
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils.log.secrets_masker import mask_secret
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class EcrCredentials:
"""Helper (frozen dataclass) for storing temporary ECR credentials."""
username: str
password: str
proxy_endpoint: str
expires_at: datetime
def __post_init__(self):
mask_secret(self.password)
logger.debug("Credentials to Amazon ECR %r expires at %s.", self.proxy_endpoint, self.expires_at)
@property
def registry(self) -> str:
"""Return registry in appropriate `docker login` format."""
# https://github.com/docker/docker-py/issues/2256#issuecomment-824940506
return self.proxy_endpoint.replace("https://", "")
class EcrHook(AwsBaseHook):
"""
Interact with Amazon Elastic Container Registry (ECR).
Provide thin wrapper around :external+boto3:py:class:`boto3.client("ecr") <ECR.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, **kwargs):
kwargs["client_type"] = "ecr"
super().__init__(**kwargs)
def get_temporary_credentials(self, registry_ids: list[str] | str | None = None) -> list[EcrCredentials]:
"""Get temporary credentials for Amazon ECR.
.. seealso::
- :external+boto3:py:meth:`ECR.Client.get_authorization_token`
:param registry_ids: Either AWS Account ID or list of AWS Account IDs that are associated
with the registries from which credentials are obtained. If you do not specify a registry,
the default registry is assumed.
:return: list of :class:`airflow.providers.amazon.aws.hooks.ecr.EcrCredentials`,
obtained credentials valid for 12 hours.
"""
registry_ids = registry_ids or None
if isinstance(registry_ids, str):
registry_ids = [registry_ids]
if registry_ids:
response = self.conn.get_authorization_token(registryIds=registry_ids)
else:
response = self.conn.get_authorization_token()
creds = []
for auth_data in response["authorizationData"]:
username, password = base64.b64decode(auth_data["authorizationToken"]).decode("utf-8").split(":")
creds.append(
EcrCredentials(
username=username,
password=password,
proxy_endpoint=auth_data["proxyEndpoint"],
expires_at=auth_data["expiresAt"],
)
)
return creds
| 3,703 | 35.313725 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/sns.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS SNS hook."""
from __future__ import annotations
import json
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
def _get_message_attribute(o):
if isinstance(o, bytes):
return {"DataType": "Binary", "BinaryValue": o}
if isinstance(o, str):
return {"DataType": "String", "StringValue": o}
if isinstance(o, (int, float)):
return {"DataType": "Number", "StringValue": str(o)}
if hasattr(o, "__iter__"):
return {"DataType": "String.Array", "StringValue": json.dumps(o)}
raise TypeError(
f"Values in MessageAttributes must be one of bytes, str, int, float, or iterable; got {type(o)}"
)
class SnsHook(AwsBaseHook):
"""
Interact with Amazon Simple Notification Service.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("sns") <SNS.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type="sns", *args, **kwargs)
def publish_to_target(
self,
target_arn: str,
message: str,
subject: str | None = None,
message_attributes: dict | None = None,
):
"""
Publish a message to a SNS topic or an endpoint.
.. seealso::
- :external+boto3:py:meth:`SNS.Client.publish`
:param target_arn: either a TopicArn or an EndpointArn
:param message: the default message you want to send
:param message: str
:param subject: subject of message
:param message_attributes: additional attributes to publish for message filtering. This should be
a flat dict; the DataType to be sent depends on the type of the value:
- bytes = Binary
- str = String
- int, float = Number
- iterable = String.Array
"""
publish_kwargs: dict[str, str | dict] = {
"TargetArn": target_arn,
"MessageStructure": "json",
"Message": json.dumps({"default": message}),
}
# Construct args this way because boto3 distinguishes from missing args and those set to None
if subject:
publish_kwargs["Subject"] = subject
if message_attributes:
publish_kwargs["MessageAttributes"] = {
key: _get_message_attribute(val) for key, val in message_attributes.items()
}
return self.get_conn().publish(**publish_kwargs)
| 3,463 | 34.71134 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/logs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import Generator
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
# Guidance received from the AWS team regarding the correct way to check for the end of a stream is that the
# value of the nextForwardToken is the same in subsequent calls.
# The issue with this approach is, it can take a huge amount of time (e.g. 20 seconds) to retrieve logs using
# this approach. As an intermediate solution, we decided to stop fetching logs if 3 consecutive responses
# are empty.
# See PR https://github.com/apache/airflow/pull/20814
NUM_CONSECUTIVE_EMPTY_RESPONSE_EXIT_THRESHOLD = 3
class AwsLogsHook(AwsBaseHook):
"""
Interact with Amazon CloudWatch Logs.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("logs") <CloudWatchLogs.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "logs"
super().__init__(*args, **kwargs)
class ContinuationToken:
"""Just a wrapper around a str token to allow updating it from the caller."""
def __init__(self):
self.value: str | None = None
def get_log_events(
self,
log_group: str,
log_stream_name: str,
start_time: int = 0,
skip: int = 0,
start_from_head: bool | None = None,
continuation_token: ContinuationToken | None = None,
) -> Generator:
"""
A generator for log items in a single stream; yields all items available at the current moment.
.. seealso::
- :external+boto3:py:meth:`CloudWatchLogs.Client.get_log_events`
:param log_group: The name of the log group.
:param log_stream_name: The name of the specific stream.
:param start_time: The time stamp value to start reading the logs from (default: 0).
:param skip: The number of log entries to skip at the start (default: 0).
This is for when there are multiple entries at the same timestamp.
:param start_from_head: Deprecated. Do not use with False, logs would be retrieved out of order.
If possible, retrieve logs in one query, or implement pagination yourself.
:param continuation_token: a token indicating where to read logs from.
Will be updated as this method reads new logs, to be reused in subsequent calls.
:return: | A CloudWatch log event with the following key-value pairs:
| 'timestamp' (int): The time in milliseconds of the event.
| 'message' (str): The log event data.
| 'ingestionTime' (int): The time in milliseconds the event was ingested.
"""
if start_from_head is not None:
message = (
"start_from_head is deprecated, please remove this parameter."
if start_from_head
else "Do not use this method with start_from_head=False, logs will be returned out of order. "
"If possible, retrieve logs in one query, or implement pagination yourself."
)
warnings.warn(
message,
AirflowProviderDeprecationWarning,
stacklevel=2,
)
else:
start_from_head = True
if continuation_token is None:
continuation_token = AwsLogsHook.ContinuationToken()
num_consecutive_empty_response = 0
while True:
if continuation_token.value is not None:
token_arg: dict[str, str] = {"nextToken": continuation_token.value}
else:
token_arg = {}
response = self.conn.get_log_events(
logGroupName=log_group,
logStreamName=log_stream_name,
startTime=start_time,
startFromHead=start_from_head,
**token_arg,
)
events = response["events"]
event_count = len(events)
if event_count > skip:
events = events[skip:]
skip = 0
else:
skip -= event_count
events = []
yield from events
if continuation_token.value == response["nextForwardToken"]:
return
if not event_count:
num_consecutive_empty_response += 1
if num_consecutive_empty_response >= NUM_CONSECUTIVE_EMPTY_RESPONSE_EXIT_THRESHOLD:
# Exit if there are more than NUM_CONSECUTIVE_EMPTY_RESPONSE_EXIT_THRESHOLD consecutive
# empty responses
return
else:
num_consecutive_empty_response = 0
continuation_token.value = response["nextForwardToken"]
| 5,883 | 39.57931 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Interact with AWS S3, using the boto3 library."""
from __future__ import annotations
import asyncio
import fnmatch
import gzip as gz
import io
import logging
import os
import re
import shutil
import warnings
from contextlib import suppress
from copy import deepcopy
from datetime import datetime
from functools import wraps
from inspect import signature
from io import BytesIO
from pathlib import Path
from tempfile import NamedTemporaryFile, gettempdir
from time import sleep
from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast
from urllib.parse import urlsplit
from uuid import uuid4
if TYPE_CHECKING:
try:
from aiobotocore.client import AioBaseClient
except ImportError:
pass
from asgiref.sync import sync_to_async
from boto3.s3.transfer import TransferConfig
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.exceptions import S3HookUriParseFailure
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.utils.tags import format_tags
from airflow.utils.helpers import chunks
if TYPE_CHECKING:
from mypy_boto3_s3.service_resource import Object as S3ResourceObject
T = TypeVar("T", bound=Callable)
logger = logging.getLogger(__name__)
def provide_bucket_name(func: T) -> T:
"""Provide a bucket name taken from the connection if no bucket name has been passed to the function."""
if hasattr(func, "_unify_bucket_name_and_key_wrapped"):
logger.warning("`unify_bucket_name_and_key` should wrap `provide_bucket_name`.")
function_signature = signature(func)
@wraps(func)
def wrapper(*args, **kwargs) -> T:
bound_args = function_signature.bind(*args, **kwargs)
if "bucket_name" not in bound_args.arguments:
self = args[0]
if "bucket_name" in self.service_config:
bound_args.arguments["bucket_name"] = self.service_config["bucket_name"]
elif self.conn_config and self.conn_config.schema:
warnings.warn(
"s3 conn_type, and the associated schema field, is deprecated. "
"Please use aws conn_type instead, and specify `bucket_name` "
"in `service_config.s3` within `extras`.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
bound_args.arguments["bucket_name"] = self.conn_config.schema
return func(*bound_args.args, **bound_args.kwargs)
return cast(T, wrapper)
def provide_bucket_name_async(func: T) -> T:
"""Provide a bucket name taken from the connection if no bucket name has been passed to the function."""
function_signature = signature(func)
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
bound_args = function_signature.bind(*args, **kwargs)
if "bucket_name" not in bound_args.arguments:
self = args[0]
if self.aws_conn_id:
connection = await sync_to_async(self.get_connection)(self.aws_conn_id)
if connection.schema:
bound_args.arguments["bucket_name"] = connection.schema
return await func(*bound_args.args, **bound_args.kwargs)
return cast(T, wrapper)
def unify_bucket_name_and_key(func: T) -> T:
"""Unify bucket name and key in case no bucket name and at least a key has been passed to the function."""
function_signature = signature(func)
@wraps(func)
def wrapper(*args, **kwargs) -> T:
bound_args = function_signature.bind(*args, **kwargs)
if "wildcard_key" in bound_args.arguments:
key_name = "wildcard_key"
elif "key" in bound_args.arguments:
key_name = "key"
else:
raise ValueError("Missing key parameter!")
if "bucket_name" not in bound_args.arguments:
with suppress(S3HookUriParseFailure):
bound_args.arguments["bucket_name"], bound_args.arguments[key_name] = S3Hook.parse_s3_url(
bound_args.arguments[key_name]
)
return func(*bound_args.args, **bound_args.kwargs)
# set attr _unify_bucket_name_and_key_wrapped so that we can check at
# class definition that unify is the first decorator applied
# if provide_bucket_name is applied first, and there's a bucket defined in conn
# then if user supplies full key, bucket in key is not respected
wrapper._unify_bucket_name_and_key_wrapped = True # type: ignore[attr-defined]
return cast(T, wrapper)
class S3Hook(AwsBaseHook):
"""
Interact with Amazon Simple Storage Service (S3).
Provide thick wrapper around :external+boto3:py:class:`boto3.client("s3") <S3.Client>`
and :external+boto3:py:class:`boto3.resource("s3") <S3.ServiceResource>`.
:param transfer_config_args: Configuration object for managed S3 transfers.
:param extra_args: Extra arguments that may be passed to the download/upload operations.
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/s3.html#s3-transfers
- For allowed upload extra arguments see ``boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS``.
- For allowed download extra arguments see ``boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS``.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(
self,
aws_conn_id: str | None = AwsBaseHook.default_conn_name,
transfer_config_args: dict | None = None,
extra_args: dict | None = None,
*args,
**kwargs,
) -> None:
kwargs["client_type"] = "s3"
kwargs["aws_conn_id"] = aws_conn_id
if transfer_config_args and not isinstance(transfer_config_args, dict):
raise TypeError(f"transfer_config_args expected dict, got {type(transfer_config_args).__name__}.")
self.transfer_config = TransferConfig(**transfer_config_args or {})
if extra_args and not isinstance(extra_args, dict):
raise TypeError(f"extra_args expected dict, got {type(extra_args).__name__}.")
self._extra_args = extra_args or {}
super().__init__(*args, **kwargs)
@property
def extra_args(self):
"""Return hook's extra arguments (immutable)."""
return deepcopy(self._extra_args)
@staticmethod
def parse_s3_url(s3url: str) -> tuple[str, str]:
"""
Parses the S3 Url into a bucket name and key.
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html
for valid url formats.
:param s3url: The S3 Url to parse.
:return: the parsed bucket name and key
"""
valid_s3_format = "S3://bucket-name/key-name"
valid_s3_virtual_hosted_format = "https://bucket-name.s3.region-code.amazonaws.com/key-name"
format = s3url.split("//")
if re.match(r"s3[na]?:", format[0], re.IGNORECASE):
parsed_url = urlsplit(s3url)
if not parsed_url.netloc:
raise S3HookUriParseFailure(
"Please provide a bucket name using a valid format of the form: "
f'{valid_s3_format} or {valid_s3_virtual_hosted_format} but provided: "{s3url}"'
)
bucket_name = parsed_url.netloc
key = parsed_url.path.lstrip("/")
elif format[0] == "https:":
temp_split = format[1].split(".")
if temp_split[0] == "s3":
split_url = format[1].split("/")
bucket_name = split_url[1]
key = "/".join(split_url[2:])
elif temp_split[1] == "s3":
bucket_name = temp_split[0]
key = "/".join(format[1].split("/")[1:])
else:
raise S3HookUriParseFailure(
"Please provide a bucket name using a valid virtually hosted format which should "
f'be of the form: {valid_s3_virtual_hosted_format} but provided: "{s3url}"'
)
else:
raise S3HookUriParseFailure(
"Please provide a bucket name using a valid format of the form: "
f'{valid_s3_format} or {valid_s3_virtual_hosted_format} but provided: "{s3url}"'
)
return bucket_name, key
@staticmethod
def get_s3_bucket_key(
bucket: str | None, key: str, bucket_param_name: str, key_param_name: str
) -> tuple[str, str]:
"""
Get the S3 bucket name and key.
From either:
- bucket name and key. Return the info as it is after checking `key` is a relative path.
- key. Must be a full s3:// url.
:param bucket: The S3 bucket name
:param key: The S3 key
:param bucket_param_name: The parameter name containing the bucket name
:param key_param_name: The parameter name containing the key name
:return: the parsed bucket name and key
"""
if bucket is None:
return S3Hook.parse_s3_url(key)
parsed_url = urlsplit(key)
if parsed_url.scheme != "" or parsed_url.netloc != "":
raise TypeError(
f"If `{bucket_param_name}` is provided, {key_param_name} should be a relative path "
"from root level, rather than a full s3:// url"
)
return bucket, key
@provide_bucket_name
def check_for_bucket(self, bucket_name: str | None = None) -> bool:
"""
Check if bucket_name exists.
.. seealso::
- :external+boto3:py:meth:`S3.Client.head_bucket`
:param bucket_name: the name of the bucket
:return: True if it exists and False if not.
"""
try:
self.get_conn().head_bucket(Bucket=bucket_name)
return True
except ClientError as e:
# The head_bucket api is odd in that it cannot return proper
# exception objects, so error codes must be used. Only 200, 404 and 403
# are ever returned. See the following links for more details:
# https://github.com/boto/boto3/issues/2499
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_bucket
return_code = int(e.response["Error"]["Code"])
if return_code == 404:
self.log.info('Bucket "%s" does not exist', bucket_name)
elif return_code == 403:
self.log.error(
'Access to bucket "%s" is forbidden or there was an error with the request', bucket_name
)
self.log.error(e)
return False
@provide_bucket_name
def get_bucket(self, bucket_name: str | None = None) -> object:
"""
Returns a :py:class:`S3.Bucket` object.
.. seealso::
- :external+boto3:py:meth:`S3.ServiceResource.Bucket`
:param bucket_name: the name of the bucket
:return: the bucket object to the bucket name.
"""
s3_resource = self.get_session().resource(
"s3",
endpoint_url=self.conn_config.endpoint_url,
config=self.config,
verify=self.verify,
)
return s3_resource.Bucket(bucket_name)
@provide_bucket_name
def create_bucket(self, bucket_name: str | None = None, region_name: str | None = None) -> None:
"""
Creates an Amazon S3 bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.create_bucket`
:param bucket_name: The name of the bucket
:param region_name: The name of the aws region in which to create the bucket.
"""
if not region_name:
if self.conn_region_name == "aws-global":
raise AirflowException(
"Unable to create bucket if `region_name` not set "
"and boto3 configured to use s3 regional endpoints."
)
region_name = self.conn_region_name
if region_name == "us-east-1":
self.get_conn().create_bucket(Bucket=bucket_name)
else:
self.get_conn().create_bucket(
Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": region_name}
)
@provide_bucket_name
def check_for_prefix(self, prefix: str, delimiter: str, bucket_name: str | None = None) -> bool:
"""
Checks that a prefix exists in a bucket.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:return: False if the prefix does not exist in the bucket and True if it does.
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(rf"(\w+[{delimiter}])$", prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return prefix in plist
@provide_bucket_name
def list_prefixes(
self,
bucket_name: str | None = None,
prefix: str | None = None,
delimiter: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
) -> list:
"""
Lists prefixes in a bucket under prefix.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of matched prefixes
"""
prefix = prefix or ""
delimiter = delimiter or ""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("list_objects_v2")
response = paginator.paginate(
Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config
)
prefixes: list[str] = []
for page in response:
if "CommonPrefixes" in page:
prefixes.extend(common_prefix["Prefix"] for common_prefix in page["CommonPrefixes"])
return prefixes
@provide_bucket_name_async
@unify_bucket_name_and_key
async def get_head_object_async(
self, client: AioBaseClient, key: str, bucket_name: str | None = None
) -> dict[str, Any] | None:
"""
Retrieves metadata of an object.
:param client: aiobotocore client
:param bucket_name: Name of the bucket in which the file is stored
:param key: S3 key that will point to the file
"""
head_object_val: dict[str, Any] | None = None
try:
head_object_val = await client.head_object(Bucket=bucket_name, Key=key)
return head_object_val
except ClientError as e:
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return head_object_val
else:
raise e
async def list_prefixes_async(
self,
client: AioBaseClient,
bucket_name: str | None = None,
prefix: str | None = None,
delimiter: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
) -> list[Any]:
"""
Lists prefixes in a bucket under prefix.
:param client: ClientCreatorContext
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of matched prefixes
"""
prefix = prefix or ""
delimiter = delimiter or ""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = client.get_paginator("list_objects_v2")
response = paginator.paginate(
Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config
)
prefixes = []
async for page in response:
if "CommonPrefixes" in page:
for common_prefix in page["CommonPrefixes"]:
prefixes.append(common_prefix["Prefix"])
return prefixes
@provide_bucket_name_async
async def get_file_metadata_async(self, client: AioBaseClient, bucket_name: str, key: str) -> list[Any]:
"""
Gets a list of files that a key matching a wildcard expression exists in a bucket asynchronously.
:param client: aiobotocore client
:param bucket_name: the name of the bucket
:param key: the path to the key
"""
prefix = re.split(r"[\[\*\?]", key, 1)[0]
delimiter = ""
paginator = client.get_paginator("list_objects_v2")
response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter)
files = []
async for page in response:
if "Contents" in page:
files += page["Contents"]
return files
async def _check_key_async(
self,
client: AioBaseClient,
bucket_val: str,
wildcard_match: bool,
key: str,
) -> bool:
"""
Get a list of files that a key matching a wildcard expression or get the head object.
If wildcard_match is True get list of files that a key matching a wildcard
expression exists in a bucket asynchronously and return the boolean value. If wildcard_match
is False get the head object from the bucket and return the boolean value.
:param client: aiobotocore client
:param bucket_val: the name of the bucket
:param key: S3 keys that will point to the file
:param wildcard_match: the path to the key
"""
bucket_name, key = self.get_s3_bucket_key(bucket_val, key, "bucket_name", "bucket_key")
if wildcard_match:
keys = await self.get_file_metadata_async(client, bucket_name, key)
key_matches = [k for k in keys if fnmatch.fnmatch(k["Key"], key)]
if len(key_matches) == 0:
return False
else:
obj = await self.get_head_object_async(client, key, bucket_name)
if obj is None:
return False
return True
async def check_key_async(
self,
client: AioBaseClient,
bucket: str,
bucket_keys: str | list[str],
wildcard_match: bool,
) -> bool:
"""
Checks for all keys in bucket and returns boolean value.
:param client: aiobotocore client
:param bucket: the name of the bucket
:param bucket_keys: S3 keys that will point to the file
:param wildcard_match: the path to the key
"""
if isinstance(bucket_keys, list):
return all(
await asyncio.gather(
*(self._check_key_async(client, bucket, wildcard_match, key) for key in bucket_keys)
)
)
return await self._check_key_async(client, bucket, wildcard_match, bucket_keys)
async def check_for_prefix_async(
self, client: AioBaseClient, prefix: str, delimiter: str, bucket_name: str | None = None
) -> bool:
"""
Checks that a prefix exists in a bucket.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:return: False if the prefix does not exist in the bucket and True if it does.
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(rf"(\w+[{delimiter}])$", prefix, 1)
previous_level = prefix_split[0]
plist = await self.list_prefixes_async(client, bucket_name, previous_level, delimiter)
return prefix in plist
async def _check_for_prefix_async(
self, client: AioBaseClient, prefix: str, delimiter: str, bucket_name: str | None = None
) -> bool:
return await self.check_for_prefix_async(
client, prefix=prefix, delimiter=delimiter, bucket_name=bucket_name
)
async def get_files_async(
self,
client: AioBaseClient,
bucket: str,
bucket_keys: str | list[str],
wildcard_match: bool,
delimiter: str | None = "/",
) -> list[Any]:
"""Gets a list of files in the bucket."""
keys: list[Any] = []
for key in bucket_keys:
prefix = key
if wildcard_match:
prefix = re.split(r"[\[\*\?]", key, 1)[0]
paginator = client.get_paginator("list_objects_v2")
response = paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter=delimiter)
async for page in response:
if "Contents" in page:
_temp = [k for k in page["Contents"] if isinstance(k.get("Size", None), (int, float))]
keys = keys + _temp
return keys
@staticmethod
async def _list_keys_async(
client: AioBaseClient,
bucket_name: str | None = None,
prefix: str | None = None,
delimiter: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
) -> list[str]:
"""
Lists keys in a bucket under prefix and not containing delimiter.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of matched keys
"""
prefix = prefix or ""
delimiter = delimiter or ""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = client.get_paginator("list_objects_v2")
response = paginator.paginate(
Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config
)
keys = []
async for page in response:
if "Contents" in page:
for k in page["Contents"]:
keys.append(k["Key"])
return keys
def _list_key_object_filter(
self, keys: list, from_datetime: datetime | None = None, to_datetime: datetime | None = None
) -> list:
def _is_in_period(input_date: datetime) -> bool:
if from_datetime is not None and input_date <= from_datetime:
return False
if to_datetime is not None and input_date > to_datetime:
return False
return True
return [k["Key"] for k in keys if _is_in_period(k["LastModified"])]
async def is_keys_unchanged_async(
self,
client: AioBaseClient,
bucket_name: str,
prefix: str,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: set[str] | None = None,
inactivity_seconds: int = 0,
allow_delete: bool = True,
last_activity_time: datetime | None = None,
) -> dict[str, Any]:
"""
Check if new objects have been uploaded and the period has passed; update sensor state accordingly.
:param client: aiobotocore client
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param inactivity_period: the total seconds of inactivity to designate
keys unchanged. Note, this mechanism is not real time and
this operator may not return until a poke_interval after this period
has passed with no additional objects sensed.
:param min_objects: the minimum number of objects needed for keys unchanged
sensor to be considered valid.
:param previous_objects: the set of object ids found during the last poke.
:param inactivity_seconds: number of inactive seconds
:param allow_delete: Should this sensor consider objects being deleted
between pokes valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:param last_activity_time: last activity datetime.
"""
if not previous_objects:
previous_objects = set()
list_keys = await self._list_keys_async(client=client, bucket_name=bucket_name, prefix=prefix)
current_objects = set(list_keys)
current_num_objects = len(current_objects)
if current_num_objects > len(previous_objects):
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s, resetting last_activity_time.",
os.path.join(bucket_name, prefix),
)
self.log.debug("New objects: %s", current_objects - previous_objects)
last_activity_time = datetime.now()
inactivity_seconds = 0
previous_objects = current_objects
return {
"status": "pending",
"previous_objects": previous_objects,
"last_activity_time": last_activity_time,
"inactivity_seconds": inactivity_seconds,
}
if len(previous_objects) - len(current_objects):
# During the last poke interval objects were deleted.
if allow_delete:
deleted_objects = previous_objects - current_objects
previous_objects = current_objects
last_activity_time = datetime.now()
self.log.info(
"Objects were deleted during the last poke interval. Updating the "
"file counter and resetting last_activity_time:\n%s",
deleted_objects,
)
return {
"status": "pending",
"previous_objects": previous_objects,
"last_activity_time": last_activity_time,
"inactivity_seconds": inactivity_seconds,
}
return {
"status": "error",
"message": f"{os.path.join(bucket_name, prefix)} between pokes.",
}
if last_activity_time:
inactivity_seconds = int((datetime.now() - last_activity_time).total_seconds())
else:
# Handles the first poke where last inactivity time is None.
last_activity_time = datetime.now()
inactivity_seconds = 0
if inactivity_seconds >= inactivity_period:
path = os.path.join(bucket_name, prefix)
if current_num_objects >= min_objects:
success_message = (
f"SUCCESS: Sensor found {current_num_objects} objects at {path}. "
"Waited at least {inactivity_period} seconds, with no new objects uploaded."
)
self.log.info(success_message)
return {
"status": "success",
"message": success_message,
}
self.log.error("FAILURE: Inactivity Period passed, not enough objects found in %s", path)
return {
"status": "error",
"message": f"FAILURE: Inactivity Period passed, not enough objects found in {path}",
}
return {
"status": "pending",
"previous_objects": previous_objects,
"last_activity_time": last_activity_time,
"inactivity_seconds": inactivity_seconds,
}
@provide_bucket_name
def list_keys(
self,
bucket_name: str | None = None,
prefix: str | None = None,
delimiter: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
start_after_key: str | None = None,
from_datetime: datetime | None = None,
to_datetime: datetime | None = None,
object_filter: Callable[..., list] | None = None,
apply_wildcard: bool = False,
) -> list:
"""
Lists keys in a bucket under prefix and not containing delimiter.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:param start_after_key: should return only keys greater than this key
:param from_datetime: should return only keys with LastModified attr greater than this equal
from_datetime
:param to_datetime: should return only keys with LastModified attr less than this to_datetime
:param object_filter: Function that receives the list of the S3 objects, from_datetime and
to_datetime and returns the List of matched key.
:param apply_wildcard: whether to treat '*' as a wildcard or a plain symbol in the prefix.
**Example**: Returns the list of S3 object with LastModified attr greater than from_datetime
and less than to_datetime:
.. code-block:: python
def object_filter(
keys: list,
from_datetime: datetime | None = None,
to_datetime: datetime | None = None,
) -> list:
def _is_in_period(input_date: datetime) -> bool:
if from_datetime is not None and input_date < from_datetime:
return False
if to_datetime is not None and input_date > to_datetime:
return False
return True
return [k["Key"] for k in keys if _is_in_period(k["LastModified"])]
:return: a list of matched keys
"""
_original_prefix = prefix or ""
_apply_wildcard = bool(apply_wildcard and "*" in _original_prefix)
_prefix = _original_prefix.split("*", 1)[0] if _apply_wildcard else _original_prefix
delimiter = delimiter or ""
start_after_key = start_after_key or ""
self.object_filter_usr = object_filter
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("list_objects_v2")
response = paginator.paginate(
Bucket=bucket_name,
Prefix=_prefix,
Delimiter=delimiter,
PaginationConfig=config,
StartAfter=start_after_key,
)
keys: list[str] = []
for page in response:
if "Contents" in page:
new_keys = page["Contents"]
if _apply_wildcard:
new_keys = (k for k in new_keys if fnmatch.fnmatch(k["Key"], _original_prefix))
keys.extend(new_keys)
if self.object_filter_usr is not None:
return self.object_filter_usr(keys, from_datetime, to_datetime)
return self._list_key_object_filter(keys, from_datetime, to_datetime)
@provide_bucket_name
def get_file_metadata(
self,
prefix: str,
bucket_name: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
) -> list:
"""
Lists metadata objects in a bucket under prefix.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param prefix: a key prefix
:param bucket_name: the name of the bucket
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of metadata of objects
"""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("list_objects_v2")
response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, PaginationConfig=config)
files = []
for page in response:
if "Contents" in page:
files += page["Contents"]
return files
@unify_bucket_name_and_key
@provide_bucket_name
def head_object(self, key: str, bucket_name: str | None = None) -> dict | None:
"""
Retrieves metadata of an object.
.. seealso::
- :external+boto3:py:meth:`S3.Client.head_object`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:return: metadata of an object
"""
try:
return self.get_conn().head_object(Bucket=bucket_name, Key=key)
except ClientError as e:
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return None
else:
raise e
@unify_bucket_name_and_key
@provide_bucket_name
def check_for_key(self, key: str, bucket_name: str | None = None) -> bool:
"""
Checks if a key exists in a bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.head_object`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:return: True if the key exists and False if not.
"""
obj = self.head_object(key, bucket_name)
return obj is not None
@unify_bucket_name_and_key
@provide_bucket_name
def get_key(self, key: str, bucket_name: str | None = None) -> S3ResourceObject:
"""
Returns a :py:class:`S3.Object`.
.. seealso::
- :external+boto3:py:meth:`S3.ServiceResource.Object`
:param key: the path to the key
:param bucket_name: the name of the bucket
:return: the key object from the bucket
"""
s3_resource = self.get_session().resource(
"s3",
endpoint_url=self.conn_config.endpoint_url,
config=self.config,
verify=self.verify,
)
obj = s3_resource.Object(bucket_name, key)
obj.load()
return obj
@unify_bucket_name_and_key
@provide_bucket_name
def read_key(self, key: str, bucket_name: str | None = None) -> str:
"""
Reads a key from S3.
.. seealso::
- :external+boto3:py:meth:`S3.Object.get`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:return: the content of the key
"""
obj = self.get_key(key, bucket_name)
return obj.get()["Body"].read().decode("utf-8")
@unify_bucket_name_and_key
@provide_bucket_name
def select_key(
self,
key: str,
bucket_name: str | None = None,
expression: str | None = None,
expression_type: str | None = None,
input_serialization: dict[str, Any] | None = None,
output_serialization: dict[str, Any] | None = None,
) -> str:
"""
Reads a key with S3 Select.
.. seealso::
- :external+boto3:py:meth:`S3.Client.select_object_content`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:param expression: S3 Select expression
:param expression_type: S3 Select expression type
:param input_serialization: S3 Select input data serialization format
:param output_serialization: S3 Select output data serialization format
:return: retrieved subset of original data by S3 Select
"""
expression = expression or "SELECT * FROM S3Object"
expression_type = expression_type or "SQL"
if input_serialization is None:
input_serialization = {"CSV": {}}
if output_serialization is None:
output_serialization = {"CSV": {}}
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization,
)
return b"".join(
event["Records"]["Payload"] for event in response["Payload"] if "Records" in event
).decode("utf-8")
@unify_bucket_name_and_key
@provide_bucket_name
def check_for_wildcard_key(
self, wildcard_key: str, bucket_name: str | None = None, delimiter: str = ""
) -> bool:
"""
Checks that a key matching a wildcard expression exists in a bucket.
:param wildcard_key: the path to the key
:param bucket_name: the name of the bucket
:param delimiter: the delimiter marks key hierarchy
:return: True if a key exists and False if not.
"""
return (
self.get_wildcard_key(wildcard_key=wildcard_key, bucket_name=bucket_name, delimiter=delimiter)
is not None
)
@unify_bucket_name_and_key
@provide_bucket_name
def get_wildcard_key(
self, wildcard_key: str, bucket_name: str | None = None, delimiter: str = ""
) -> S3ResourceObject | None:
"""
Returns a boto3.s3.Object object matching the wildcard expression.
:param wildcard_key: the path to the key
:param bucket_name: the name of the bucket
:param delimiter: the delimiter marks key hierarchy
:return: the key object from the bucket or None if none has been found.
"""
prefix = re.split(r"[\[\*\?]", wildcard_key, 1)[0]
key_list = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
key_matches = [k for k in key_list if fnmatch.fnmatch(k, wildcard_key)]
if key_matches:
return self.get_key(key_matches[0], bucket_name)
return None
@unify_bucket_name_and_key
@provide_bucket_name
def load_file(
self,
filename: Path | str,
key: str,
bucket_name: str | None = None,
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str | None = None,
) -> None:
"""
Loads a local file to S3.
.. seealso::
- :external+boto3:py:meth:`S3.Client.upload_file`
:param filename: path to the file to load.
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which to store the file
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param gzip: If True, the file will be compressed locally
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
"""
filename = str(filename)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError(f"The key {key} already exists.")
extra_args = self.extra_args
if encrypt:
extra_args["ServerSideEncryption"] = "AES256"
if gzip:
with open(filename, "rb") as f_in:
filename_gz = f"{f_in.name}.gz"
with gz.open(filename_gz, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
if acl_policy:
extra_args["ACL"] = acl_policy
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args, Config=self.transfer_config)
@unify_bucket_name_and_key
@provide_bucket_name
def load_string(
self,
string_data: str,
key: str,
bucket_name: str | None = None,
replace: bool = False,
encrypt: bool = False,
encoding: str | None = None,
acl_policy: str | None = None,
compression: str | None = None,
) -> None:
"""
Loads a string to S3.
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
.. seealso::
- :external+boto3:py:meth:`S3.Client.upload_fileobj`
:param string_data: str to set as content for the key.
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which to store the file
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param encoding: The string to byte encoding
:param acl_policy: The string to specify the canned ACL policy for the
object to be uploaded
:param compression: Type of compression to use, currently only gzip is supported.
"""
encoding = encoding or "utf-8"
bytes_data = string_data.encode(encoding)
# Compress string
available_compressions = ["gzip"]
if compression is not None and compression not in available_compressions:
raise NotImplementedError(
f"Received {compression} compression type. "
f"String can currently be compressed in {available_compressions} only."
)
if compression == "gzip":
bytes_data = gz.compress(bytes_data)
file_obj = io.BytesIO(bytes_data)
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
file_obj.close()
@unify_bucket_name_and_key
@provide_bucket_name
def load_bytes(
self,
bytes_data: bytes,
key: str,
bucket_name: str | None = None,
replace: bool = False,
encrypt: bool = False,
acl_policy: str | None = None,
) -> None:
"""
Loads bytes to S3.
This is provided as a convenience to drop bytes data into S3. It uses the
boto infrastructure to ship a file to s3.
.. seealso::
- :external+boto3:py:meth:`S3.Client.upload_fileobj`
:param bytes_data: bytes to set as content for the key.
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which to store the file
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param acl_policy: The string to specify the canned ACL policy for the
object to be uploaded
"""
file_obj = io.BytesIO(bytes_data)
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
file_obj.close()
@unify_bucket_name_and_key
@provide_bucket_name
def load_file_obj(
self,
file_obj: BytesIO,
key: str,
bucket_name: str | None = None,
replace: bool = False,
encrypt: bool = False,
acl_policy: str | None = None,
) -> None:
"""
Loads a file object to S3.
.. seealso::
- :external+boto3:py:meth:`S3.Client.upload_fileobj`
:param file_obj: The file-like object to set as the content for the S3 key.
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which to store the file
:param replace: A flag that indicates whether to overwrite the key
if it already exists.
:param encrypt: If True, S3 encrypts the file on the server,
and the file is stored in encrypted form at rest in S3.
:param acl_policy: The string to specify the canned ACL policy for the
object to be uploaded
"""
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
def _upload_file_obj(
self,
file_obj: BytesIO,
key: str,
bucket_name: str | None = None,
replace: bool = False,
encrypt: bool = False,
acl_policy: str | None = None,
) -> None:
if not replace and self.check_for_key(key, bucket_name):
raise ValueError(f"The key {key} already exists.")
extra_args = self.extra_args
if encrypt:
extra_args["ServerSideEncryption"] = "AES256"
if acl_policy:
extra_args["ACL"] = acl_policy
client = self.get_conn()
client.upload_fileobj(
file_obj,
bucket_name,
key,
ExtraArgs=extra_args,
Config=self.transfer_config,
)
def copy_object(
self,
source_bucket_key: str,
dest_bucket_key: str,
source_bucket_name: str | None = None,
dest_bucket_name: str | None = None,
source_version_id: str | None = None,
acl_policy: str | None = None,
) -> None:
"""
Creates a copy of an object that is already stored in S3.
.. seealso::
- :external+boto3:py:meth:`S3.Client.copy_object`
Note: the S3 connection used here needs to have access to both
source and destination bucket/key.
:param source_bucket_key: The key of the source object.
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit source_bucket_name.
:param dest_bucket_key: The key of the object to copy to.
The convention to specify `dest_bucket_key` is the same
as `source_bucket_key`.
:param source_bucket_name: Name of the S3 bucket where the source object is in.
It should be omitted when `source_bucket_key` is provided as a full s3:// url.
:param dest_bucket_name: Name of the S3 bucket to where the object is copied.
It should be omitted when `dest_bucket_key` is provided as a full s3:// url.
:param source_version_id: Version ID of the source object (OPTIONAL)
:param acl_policy: The string to specify the canned ACL policy for the
object to be copied which is private by default.
"""
acl_policy = acl_policy or "private"
dest_bucket_name, dest_bucket_key = self.get_s3_bucket_key(
dest_bucket_name, dest_bucket_key, "dest_bucket_name", "dest_bucket_key"
)
source_bucket_name, source_bucket_key = self.get_s3_bucket_key(
source_bucket_name, source_bucket_key, "source_bucket_name", "source_bucket_key"
)
copy_source = {"Bucket": source_bucket_name, "Key": source_bucket_key, "VersionId": source_version_id}
response = self.get_conn().copy_object(
Bucket=dest_bucket_name, Key=dest_bucket_key, CopySource=copy_source, ACL=acl_policy
)
return response
@provide_bucket_name
def delete_bucket(self, bucket_name: str, force_delete: bool = False, max_retries: int = 5) -> None:
"""
To delete s3 bucket, delete all s3 bucket objects and then delete the bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.delete_bucket`
:param bucket_name: Bucket name
:param force_delete: Enable this to delete bucket even if not empty
:param max_retries: A bucket must be empty to be deleted. If force_delete is true,
then retries may help prevent a race condition between deleting objects in the
bucket and trying to delete the bucket.
:return: None
"""
tries_remaining = max_retries + 1
if force_delete:
while tries_remaining:
bucket_keys = self.list_keys(bucket_name=bucket_name)
if not bucket_keys:
break
if tries_remaining <= max_retries:
# Avoid first loop
sleep(500)
self.delete_objects(bucket=bucket_name, keys=bucket_keys)
tries_remaining -= 1
self.conn.delete_bucket(Bucket=bucket_name)
def delete_objects(self, bucket: str, keys: str | list) -> None:
"""
Delete keys from the bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.delete_objects`
:param bucket: Name of the bucket in which you are going to delete object(s)
:param keys: The key(s) to delete from S3 bucket.
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
"""
if isinstance(keys, str):
keys = [keys]
s3 = self.get_conn()
# We can only send a maximum of 1000 keys per request.
# For details see:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.delete_objects
for chunk in chunks(keys, chunk_size=1000):
response = s3.delete_objects(Bucket=bucket, Delete={"Objects": [{"Key": k} for k in chunk]})
deleted_keys = [x["Key"] for x in response.get("Deleted", [])]
self.log.info("Deleted: %s", deleted_keys)
if "Errors" in response:
errors_keys = [x["Key"] for x in response.get("Errors", [])]
raise AirflowException(f"Errors when deleting: {errors_keys}")
@unify_bucket_name_and_key
@provide_bucket_name
def download_file(
self,
key: str,
bucket_name: str | None = None,
local_path: str | None = None,
preserve_file_name: bool = False,
use_autogenerated_subdir: bool = True,
) -> str:
"""
Downloads a file from the S3 location to the local file system.
.. seealso::
- :external+boto3:py:meth:`S3.Object.download_fileobj`
:param key: The key path in S3.
:param bucket_name: The specific bucket to use.
:param local_path: The local path to the downloaded file. If no path is provided it will use the
system's temporary directory.
:param preserve_file_name: If you want the downloaded file name to be the same name as it is in S3,
set this parameter to True. When set to False, a random filename will be generated.
Default: False.
:param use_autogenerated_subdir: Pairs with 'preserve_file_name = True' to download the file into a
random generated folder inside the 'local_path', useful to avoid collisions between various tasks
that might download the same file name. Set it to 'False' if you don't want it, and you want a
predictable path.
Default: True.
:return: the file name.
"""
self.log.info(
"This function shadows the 'download_file' method of S3 API, but it is not the same. If you "
"want to use the original method from S3 API, please call "
"'S3Hook.get_conn().download_file()'"
)
self.log.info("Downloading source S3 file from Bucket %s with path %s", bucket_name, key)
try:
s3_obj = self.get_key(key, bucket_name)
except ClientError as e:
if e.response.get("Error", {}).get("Code") == 404:
raise AirflowException(
f"The source file in Bucket {bucket_name} with path {key} does not exist"
)
else:
raise e
if preserve_file_name:
local_dir = local_path if local_path else gettempdir()
subdir = f"airflow_tmp_dir_{uuid4().hex[0:8]}" if use_autogenerated_subdir else ""
filename_in_s3 = s3_obj.key.rsplit("/", 1)[-1]
file_path = Path(local_dir, subdir, filename_in_s3)
if file_path.is_file():
self.log.error("file '%s' already exists. Failing the task and not overwriting it", file_path)
raise FileExistsError
file_path.parent.mkdir(exist_ok=True, parents=True)
file = open(file_path, "wb")
else:
file = NamedTemporaryFile(dir=local_path, prefix="airflow_tmp_", delete=False) # type: ignore
with file:
s3_obj.download_fileobj(
file,
ExtraArgs=self.extra_args,
Config=self.transfer_config,
)
return file.name
def generate_presigned_url(
self,
client_method: str,
params: dict | None = None,
expires_in: int = 3600,
http_method: str | None = None,
) -> str | None:
"""
Generate a presigned url given a client, its method, and arguments.
.. seealso::
- :external+boto3:py:meth:`S3.Client.generate_presigned_url`
:param client_method: The client method to presign for.
:param params: The parameters normally passed to ClientMethod.
:param expires_in: The number of seconds the presigned url is valid for.
By default it expires in an hour (3600 seconds).
:param http_method: The http method to use on the generated url.
By default, the http method is whatever is used in the method's model.
:return: The presigned url.
"""
s3_client = self.get_conn()
try:
return s3_client.generate_presigned_url(
ClientMethod=client_method, Params=params, ExpiresIn=expires_in, HttpMethod=http_method
)
except ClientError as e:
self.log.error(e.response["Error"]["Message"])
return None
@provide_bucket_name
def get_bucket_tagging(self, bucket_name: str | None = None) -> list[dict[str, str]] | None:
"""
Gets a List of tags from a bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.get_bucket_tagging`
:param bucket_name: The name of the bucket.
:return: A List containing the key/value pairs for the tags
"""
try:
s3_client = self.get_conn()
result = s3_client.get_bucket_tagging(Bucket=bucket_name)["TagSet"]
self.log.info("S3 Bucket Tag Info: %s", result)
return result
except ClientError as e:
self.log.error(e)
raise e
@provide_bucket_name
def put_bucket_tagging(
self,
tag_set: dict[str, str] | list[dict[str, str]] | None = None,
key: str | None = None,
value: str | None = None,
bucket_name: str | None = None,
) -> None:
"""
Overwrites the existing TagSet with provided tags; must provide a TagSet, a key/value pair, or both.
.. seealso::
- :external+boto3:py:meth:`S3.Client.put_bucket_tagging`
:param tag_set: A dictionary containing the key/value pairs for the tags,
or a list already formatted for the API
:param key: The Key for the new TagSet entry.
:param value: The Value for the new TagSet entry.
:param bucket_name: The name of the bucket.
:return: None
"""
formatted_tags = format_tags(tag_set)
if key and value:
formatted_tags.append({"Key": key, "Value": value})
elif key or value:
message = (
"Key and Value must be specified as a pair. "
f"Only one of the two had a value (key: '{key}', value: '{value}')"
)
self.log.error(message)
raise ValueError(message)
self.log.info("Tagging S3 Bucket %s with %s", bucket_name, formatted_tags)
try:
s3_client = self.get_conn()
s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": formatted_tags})
except ClientError as e:
self.log.error(e)
raise e
@provide_bucket_name
def delete_bucket_tagging(self, bucket_name: str | None = None) -> None:
"""
Deletes all tags from a bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.delete_bucket_tagging`
:param bucket_name: The name of the bucket.
:return: None
"""
s3_client = self.get_conn()
s3_client.delete_bucket_tagging(Bucket=bucket_name)
| 58,255 | 37.478203 | 118 | py |
airflow | airflow-main/airflow/providers/amazon/aws/hooks/dms.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from enum import Enum
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class DmsTaskWaiterStatus(str, Enum):
"""Available AWS DMS Task Waiter statuses."""
DELETED = "deleted"
READY = "ready"
RUNNING = "running"
STOPPED = "stopped"
class DmsHook(AwsBaseHook):
"""
Interact with AWS Database Migration Service (DMS).
Provide thin wrapper around
:external+boto3:py:class:`boto3.client("dms") <DatabaseMigrationService.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs):
kwargs["client_type"] = "dms"
super().__init__(*args, **kwargs)
def describe_replication_tasks(self, **kwargs) -> tuple[str | None, list]:
"""
Describe replication tasks.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replication_tasks`
:return: Marker and list of replication tasks
"""
dms_client = self.get_conn()
response = dms_client.describe_replication_tasks(**kwargs)
return response.get("Marker"), response.get("ReplicationTasks", [])
def find_replication_tasks_by_arn(self, replication_task_arn: str, without_settings: bool | None = False):
"""
Find and describe replication tasks by task ARN.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replication_tasks`
:param replication_task_arn: Replication task arn
:param without_settings: Indicates whether to return task information with settings.
:return: list of replication tasks that match the ARN
"""
_, tasks = self.describe_replication_tasks(
Filters=[
{
"Name": "replication-task-arn",
"Values": [replication_task_arn],
}
],
WithoutSettings=without_settings,
)
return tasks
def get_task_status(self, replication_task_arn: str) -> str | None:
"""
Retrieve task status.
:param replication_task_arn: Replication task ARN
:return: Current task status
"""
replication_tasks = self.find_replication_tasks_by_arn(
replication_task_arn=replication_task_arn,
without_settings=True,
)
if len(replication_tasks) == 1:
status = replication_tasks[0]["Status"]
self.log.info('Replication task with ARN(%s) has status "%s".', replication_task_arn, status)
return status
else:
self.log.info("Replication task with ARN(%s) is not found.", replication_task_arn)
return None
def create_replication_task(
self,
replication_task_id: str,
source_endpoint_arn: str,
target_endpoint_arn: str,
replication_instance_arn: str,
migration_type: str,
table_mappings: dict,
**kwargs,
) -> str:
"""
Create DMS replication task.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.create_replication_task`
:param replication_task_id: Replication task id
:param source_endpoint_arn: Source endpoint ARN
:param target_endpoint_arn: Target endpoint ARN
:param replication_instance_arn: Replication instance ARN
:param table_mappings: Table mappings
:param migration_type: Migration type ('full-load'|'cdc'|'full-load-and-cdc'), full-load by default.
:return: Replication task ARN
"""
dms_client = self.get_conn()
create_task_response = dms_client.create_replication_task(
ReplicationTaskIdentifier=replication_task_id,
SourceEndpointArn=source_endpoint_arn,
TargetEndpointArn=target_endpoint_arn,
ReplicationInstanceArn=replication_instance_arn,
MigrationType=migration_type,
TableMappings=json.dumps(table_mappings),
**kwargs,
)
replication_task_arn = create_task_response["ReplicationTask"]["ReplicationTaskArn"]
self.wait_for_task_status(replication_task_arn, DmsTaskWaiterStatus.READY)
return replication_task_arn
def start_replication_task(
self,
replication_task_arn: str,
start_replication_task_type: str,
**kwargs,
):
"""
Starts replication task.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.start_replication_task`
:param replication_task_arn: Replication task ARN
:param start_replication_task_type: Replication task start type (default='start-replication')
('start-replication'|'resume-processing'|'reload-target')
"""
dms_client = self.get_conn()
dms_client.start_replication_task(
ReplicationTaskArn=replication_task_arn,
StartReplicationTaskType=start_replication_task_type,
**kwargs,
)
def stop_replication_task(self, replication_task_arn):
"""
Stops replication task.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.stop_replication_task`
:param replication_task_arn: Replication task ARN
"""
dms_client = self.get_conn()
dms_client.stop_replication_task(ReplicationTaskArn=replication_task_arn)
def delete_replication_task(self, replication_task_arn):
"""
Starts replication task deletion and waits for it to be deleted.
.. seealso::
- :external+boto3:py:meth:`DatabaseMigrationService.Client.delete_replication_task`
:param replication_task_arn: Replication task ARN
"""
dms_client = self.get_conn()
dms_client.delete_replication_task(ReplicationTaskArn=replication_task_arn)
self.wait_for_task_status(replication_task_arn, DmsTaskWaiterStatus.DELETED)
def wait_for_task_status(self, replication_task_arn: str, status: DmsTaskWaiterStatus):
"""
Waits for replication task to reach status; supported statuses: deleted, ready, running, stopped.
:param status: Status to wait for
:param replication_task_arn: Replication task ARN
"""
if not isinstance(status, DmsTaskWaiterStatus):
raise TypeError("Status must be an instance of DmsTaskWaiterStatus")
dms_client = self.get_conn()
waiter = dms_client.get_waiter(f"replication_task_{status.value}")
waiter.wait(
Filters=[
{
"Name": "replication-task-arn",
"Values": [
replication_task_arn,
],
},
],
WithoutSettings=True,
)
| 7,907 | 34.621622 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/waiters/base_waiter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import boto3
from botocore.waiter import Waiter, WaiterModel, create_waiter_with_client
class BaseBotoWaiter:
"""
Used to create custom Boto3 Waiters.
For more details, see airflow/providers/amazon/aws/waiters/README.md
"""
def __init__(self, client: boto3.client, model_config: dict, deferrable: bool = False) -> None:
self.model = WaiterModel(model_config)
self.client = client
self.deferrable = deferrable
def _get_async_waiter_with_client(self, waiter_name: str):
from aiobotocore.waiter import create_waiter_with_client as create_async_waiter_with_client
return create_async_waiter_with_client(
waiter_name=waiter_name, waiter_model=self.model, client=self.client
)
def waiter(self, waiter_name: str) -> Waiter:
if self.deferrable:
return self._get_async_waiter_with_client(waiter_name=waiter_name)
return create_waiter_with_client(waiter_name=waiter_name, waiter_model=self.model, client=self.client)
| 1,853 | 37.625 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/waiters/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/connection_wrapper.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import warnings
from copy import deepcopy
from dataclasses import MISSING, InitVar, dataclass, field, fields
from functools import cached_property
from typing import TYPE_CHECKING, Any
from botocore import UNSIGNED
from botocore.config import Config
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.utils import trim_none_values
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.secrets_masker import mask_secret
from airflow.utils.types import NOTSET, ArgNotSet
if TYPE_CHECKING:
from airflow.models.connection import Connection # Avoid circular imports.
@dataclass
class _ConnectionMetadata:
"""Connection metadata data-class.
This class implements main :ref:`~airflow.models.connection.Connection` attributes
and use in AwsConnectionWrapper for avoid circular imports.
Only for internal usage, this class might change or removed in the future.
"""
conn_id: str | None = None
conn_type: str | None = None
description: str | None = None
host: str | None = None
login: str | None = None
password: str | None = None
schema: str | None = None
port: int | None = None
extra: str | dict | None = None
@property
def extra_dejson(self):
if not self.extra:
return {}
extra = deepcopy(self.extra)
if isinstance(extra, str):
try:
extra = json.loads(extra)
except json.JSONDecodeError as err:
raise AirflowException(
f"'extra' expected valid JSON-Object string. Original error:\n * {err}"
) from None
if not isinstance(extra, dict):
raise TypeError(f"Expected JSON-Object or dict, got {type(extra).__name__}.")
return extra
@dataclass
class AwsConnectionWrapper(LoggingMixin):
"""AWS Connection Wrapper class helper.
Use for validate and resolve AWS Connection parameters.
``conn`` references an Airflow Connection object or AwsConnectionWrapper
if it set to ``None`` than default values would use.
The precedence rules for ``region_name``
1. Explicit set (in Hook) ``region_name``.
2. Airflow Connection Extra 'region_name'.
The precedence rules for ``botocore_config``
1. Explicit set (in Hook) ``botocore_config``.
2. Construct from Airflow Connection Extra 'botocore_kwargs'.
3. The wrapper's default value
"""
conn: InitVar[Connection | AwsConnectionWrapper | _ConnectionMetadata | None]
region_name: str | None = field(default=None)
# boto3 client/resource configs
botocore_config: Config | None = field(default=None)
verify: bool | str | None = field(default=None)
# Reference to Airflow Connection attributes
# ``extra_config`` contains original Airflow Connection Extra.
conn_id: str | ArgNotSet | None = field(init=False, default=NOTSET)
conn_type: str | None = field(init=False, default=None)
login: str | None = field(init=False, repr=False, default=None)
password: str | None = field(init=False, repr=False, default=None)
schema: str | None = field(init=False, repr=False, default=None)
extra_config: dict[str, Any] = field(init=False, repr=False, default_factory=dict)
# AWS Credentials from connection.
aws_access_key_id: str | None = field(init=False, default=None)
aws_secret_access_key: str | None = field(init=False, default=None)
aws_session_token: str | None = field(init=False, default=None)
# AWS Shared Credential profile_name
profile_name: str | None = field(init=False, default=None)
# Custom endpoint_url for boto3.client and boto3.resource
endpoint_url: str | None = field(init=False, default=None)
# Assume Role Configurations
role_arn: str | None = field(init=False, default=None)
assume_role_method: str | None = field(init=False, default=None)
assume_role_kwargs: dict[str, Any] = field(init=False, default_factory=dict)
@cached_property
def conn_repr(self):
return f"AWS Connection (conn_id={self.conn_id!r}, conn_type={self.conn_type!r})"
def get_service_config(self, service_name):
return self.extra_dejson.get("service_config", {}).get(service_name, {})
def __post_init__(self, conn: Connection):
if isinstance(conn, type(self)):
# For every field with init=False we copy reference value from original wrapper
# For every field with init=True we use init values if it not equal default
# We can't use ``dataclasses.replace`` in classmethod because
# we limited by InitVar arguments since it not stored in object,
# and also we do not want to run __post_init__ method again which print all logs/warnings again.
for fl in fields(conn):
value = getattr(conn, fl.name)
if not fl.init:
setattr(self, fl.name, value)
else:
if fl.default is not MISSING:
default = fl.default
elif fl.default_factory is not MISSING:
default = fl.default_factory() # zero-argument callable
else:
continue # Value mandatory, skip
orig_value = getattr(self, fl.name)
if orig_value == default:
# Only replace value if it not equal default value
setattr(self, fl.name, value)
return
elif not conn:
return
# Assign attributes from AWS Connection
self.conn_id = conn.conn_id
self.conn_type = conn.conn_type or "aws"
self.login = conn.login
self.password = conn.password
self.schema = conn.schema or None
self.extra_config = deepcopy(conn.extra_dejson)
if self.conn_type.lower() == "s3":
warnings.warn(
f"{self.conn_repr} has connection type 's3', "
"which has been replaced by connection type 'aws'. "
"Please update your connection to have `conn_type='aws'`.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
elif self.conn_type != "aws":
warnings.warn(
f"{self.conn_repr} expected connection type 'aws', got {self.conn_type!r}. "
"This connection might not work correctly. "
"Please use Amazon Web Services Connection type.",
UserWarning,
stacklevel=2,
)
extra = deepcopy(conn.extra_dejson)
session_kwargs = extra.get("session_kwargs", {})
if session_kwargs:
warnings.warn(
"'session_kwargs' in extra config is deprecated and will be removed in a future releases. "
f"Please specify arguments passed to boto3 Session directly in {self.conn_repr} extra.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
# Retrieve initial connection credentials
init_credentials = self._get_credentials(**extra)
self.aws_access_key_id, self.aws_secret_access_key, self.aws_session_token = init_credentials
if not self.region_name:
if "region_name" in extra:
self.region_name = extra["region_name"]
self.log.debug("Retrieving region_name=%s from %s extra.", self.region_name, self.conn_repr)
elif "region_name" in session_kwargs:
self.region_name = session_kwargs["region_name"]
self.log.debug(
"Retrieving region_name=%s from %s extra['session_kwargs'].",
self.region_name,
self.conn_repr,
)
if self.verify is None and "verify" in extra:
self.verify = extra["verify"]
self.log.debug("Retrieving verify=%s from %s extra.", self.verify, self.conn_repr)
if "profile_name" in extra:
self.profile_name = extra["profile_name"]
self.log.debug("Retrieving profile_name=%s from %s extra.", self.profile_name, self.conn_repr)
elif "profile_name" in session_kwargs:
self.profile_name = session_kwargs["profile_name"]
self.log.debug(
"Retrieving profile_name=%s from %s extra['session_kwargs'].",
self.profile_name,
self.conn_repr,
)
# Warn the user that an invalid parameter is being used which actually not related to 'profile_name'.
# ToDo: Remove this check entirely as soon as drop support credentials from s3_config_file
if "profile" in extra and "s3_config_file" not in extra and not self.profile_name:
warnings.warn(
f"Found 'profile' without specifying 's3_config_file' in {self.conn_repr} extra. "
"If required profile from AWS Shared Credentials please "
f"set 'profile_name' in {self.conn_repr} extra.",
UserWarning,
stacklevel=2,
)
config_kwargs = extra.get("config_kwargs")
if not self.botocore_config and config_kwargs:
# https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
self.log.debug("Retrieving botocore config=%s from %s extra.", config_kwargs, self.conn_repr)
if config_kwargs.get("signature_version") == "unsigned":
config_kwargs["signature_version"] = UNSIGNED
self.botocore_config = Config(**config_kwargs)
if conn.host:
warnings.warn(
f"Host {conn.host} specified in the connection is not used."
" Please, set it on extra['endpoint_url'] instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.endpoint_url = extra.get("host")
if self.endpoint_url:
warnings.warn(
"extra['host'] is deprecated and will be removed in a future release."
" Please set extra['endpoint_url'] instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
else:
self.endpoint_url = extra.get("endpoint_url")
# Retrieve Assume Role Configuration
assume_role_configs = self._get_assume_role_configs(**extra)
self.role_arn, self.assume_role_method, self.assume_role_kwargs = assume_role_configs
@classmethod
def from_connection_metadata(
cls,
conn_id: str | None = None,
login: str | None = None,
password: str | None = None,
extra: dict[str, Any] | None = None,
):
"""
Create config from connection metadata.
:param conn_id: Custom connection ID.
:param login: AWS Access Key ID.
:param password: AWS Secret Access Key.
:param extra: Connection Extra metadata.
"""
conn_meta = _ConnectionMetadata(
conn_id=conn_id, conn_type="aws", login=login, password=password, extra=extra
)
return cls(conn=conn_meta)
@property
def extra_dejson(self):
"""Compatibility with `airflow.models.Connection.extra_dejson` property."""
return self.extra_config
@property
def session_kwargs(self) -> dict[str, Any]:
"""Additional kwargs passed to boto3.session.Session."""
return trim_none_values(
{
"aws_access_key_id": self.aws_access_key_id,
"aws_secret_access_key": self.aws_secret_access_key,
"aws_session_token": self.aws_session_token,
"region_name": self.region_name,
"profile_name": self.profile_name,
}
)
def __bool__(self):
return self.conn_id is not NOTSET
def _get_credentials(
self,
*,
aws_access_key_id: str | None = None,
aws_secret_access_key: str | None = None,
aws_session_token: str | None = None,
# Deprecated Values
s3_config_file: str | None = None,
s3_config_format: str | None = None,
profile: str | None = None,
session_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> tuple[str | None, str | None, str | None]:
"""Get AWS credentials from connection login/password and extra.
``aws_access_key_id`` and ``aws_secret_access_key`` order:
1. From Connection login and password
2. From Connection ``extra['aws_access_key_id']`` and
``extra['aws_access_key_id']``
3. (deprecated) Form Connection ``extra['session_kwargs']``
4. (deprecated) From a local credentials file
Get ``aws_session_token`` from ``extra['aws_access_key_id']``.
"""
session_kwargs = session_kwargs or {}
session_aws_access_key_id = session_kwargs.get("aws_access_key_id")
session_aws_secret_access_key = session_kwargs.get("aws_secret_access_key")
session_aws_session_token = session_kwargs.get("aws_session_token")
if self.login and self.password:
self.log.info("%s credentials retrieved from login and password.", self.conn_repr)
aws_access_key_id, aws_secret_access_key = self.login, self.password
elif aws_access_key_id and aws_secret_access_key:
self.log.info("%s credentials retrieved from extra.", self.conn_repr)
elif session_aws_access_key_id and session_aws_secret_access_key:
aws_access_key_id = session_aws_access_key_id
aws_secret_access_key = session_aws_secret_access_key
self.log.info("%s credentials retrieved from extra['session_kwargs'].", self.conn_repr)
elif s3_config_file:
aws_access_key_id, aws_secret_access_key = _parse_s3_config(
s3_config_file,
s3_config_format,
profile,
)
self.log.info("%s credentials retrieved from extra['s3_config_file']", self.conn_repr)
if aws_session_token:
self.log.info(
"%s session token retrieved from extra, please note you are responsible for renewing these.",
self.conn_repr,
)
elif session_aws_session_token:
aws_session_token = session_aws_session_token
self.log.info(
"%s session token retrieved from extra['session_kwargs'], "
"please note you are responsible for renewing these.",
self.conn_repr,
)
return aws_access_key_id, aws_secret_access_key, aws_session_token
def _get_assume_role_configs(
self,
*,
role_arn: str | None = None,
assume_role_method: str = "assume_role",
assume_role_kwargs: dict[str, Any] | None = None,
# Deprecated Values
aws_account_id: str | None = None,
aws_iam_role: str | None = None,
external_id: str | None = None,
**kwargs,
) -> tuple[str | None, str | None, dict[Any, str]]:
"""Get assume role configs from Connection extra."""
if role_arn:
self.log.debug("Retrieving role_arn=%r from %s extra.", role_arn, self.conn_repr)
elif aws_account_id and aws_iam_role:
warnings.warn(
"Constructing 'role_arn' from extra['aws_account_id'] and extra['aws_iam_role'] is deprecated"
f" and will be removed in a future releases."
f" Please set 'role_arn' in {self.conn_repr} extra.",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
role_arn = f"arn:aws:iam::{aws_account_id}:role/{aws_iam_role}"
self.log.debug(
"Constructions role_arn=%r from %s extra['aws_account_id'] and extra['aws_iam_role'].",
role_arn,
self.conn_repr,
)
if not role_arn:
# There is no reason obtain `assume_role_method` and `assume_role_kwargs` if `role_arn` not set.
return None, None, {}
supported_methods = ["assume_role", "assume_role_with_saml", "assume_role_with_web_identity"]
if assume_role_method not in supported_methods:
raise NotImplementedError(
f"Found assume_role_method={assume_role_method!r} in {self.conn_repr} extra."
f" Currently {supported_methods} are supported."
' (Exclude this setting will default to "assume_role").'
)
self.log.debug("Retrieve assume_role_method=%r from %s.", assume_role_method, self.conn_repr)
assume_role_kwargs = assume_role_kwargs or {}
if "ExternalId" not in assume_role_kwargs and external_id:
warnings.warn(
"'external_id' in extra config is deprecated and will be removed in a future releases. "
f"Please set 'ExternalId' in 'assume_role_kwargs' in {self.conn_repr} extra.",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
assume_role_kwargs["ExternalId"] = external_id
return role_arn, assume_role_method, assume_role_kwargs
def _parse_s3_config(
config_file_name: str, config_format: str | None = "boto", profile: str | None = None
) -> tuple[str | None, str | None]:
"""Parse a config file for S3 credentials.
Can currently parse boto, s3cmd.conf and AWS SDK config formats.
:param config_file_name: path to the config file
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:param profile: profile name in AWS type config file
"""
warnings.warn(
"Use local credentials file is never documented and well tested. "
"Obtain credentials by this way deprecated and will be removed in a future releases.",
AirflowProviderDeprecationWarning,
stacklevel=4,
)
import configparser
config = configparser.ConfigParser()
try:
if config.read(config_file_name): # pragma: no cover
sections = config.sections()
else:
raise AirflowException(f"Couldn't read {config_file_name}")
except Exception as e:
raise AirflowException("Exception when parsing %s: %s", config_file_name, e.__class__.__name__)
# Setting option names depending on file format
if config_format is None:
config_format = "boto"
conf_format = config_format.lower()
if conf_format == "boto": # pragma: no cover
if profile is not None and "profile " + profile in sections:
cred_section = "profile " + profile
else:
cred_section = "Credentials"
elif conf_format == "aws" and profile is not None:
cred_section = profile
else:
cred_section = "default"
# Option names
if conf_format in ("boto", "aws"): # pragma: no cover
key_id_option = "aws_access_key_id"
secret_key_option = "aws_secret_access_key"
else:
key_id_option = "access_key"
secret_key_option = "secret_key"
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = config.get(cred_section, key_id_option)
secret_key = config.get(cred_section, secret_key_option)
mask_secret(secret_key)
except Exception:
raise AirflowException("Option Error in parsing s3 config file")
return access_key, secret_key
| 20,649 | 41.402464 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/sagemaker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from enum import Enum
class ApprovalStatus(Enum):
"""Approval statuses for a Sagemaker Model Package."""
APPROVED = "Approved"
REJECTED = "Rejected"
PENDING_MANUAL_APPROVAL = "PendingManualApproval"
| 1,040 | 34.896552 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/emailer.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Airflow module for email backend using AWS SES."""
from __future__ import annotations
from typing import Any
from airflow.providers.amazon.aws.hooks.ses import SesHook
def send_email(
to: list[str] | str,
subject: str,
html_content: str,
files: list | None = None,
cc: list[str] | str | None = None,
bcc: list[str] | str | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
conn_id: str = "aws_default",
from_email: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""Email backend for SES."""
if from_email is None:
raise RuntimeError("The `from_email' configuration has to be set for the SES emailer.")
hook = SesHook(aws_conn_id=conn_id)
hook.send_email(
mail_from=from_email,
to=to,
subject=subject,
html_content=html_content,
files=files,
cc=cc,
bcc=bcc,
mime_subtype=mime_subtype,
mime_charset=mime_charset,
custom_headers=custom_headers,
)
| 1,854 | 32.125 | 95 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/waiter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import time
from typing import Callable
from airflow.exceptions import AirflowException
log = logging.getLogger(__name__)
def waiter(
get_state_callable: Callable,
get_state_args: dict,
parse_response: list,
desired_state: set,
failure_states: set,
object_type: str,
action: str,
countdown: int | float | None = 25 * 60,
check_interval_seconds: int = 60,
) -> None:
"""
Call get_state_callable until it reaches the desired_state or the failure_states.
PLEASE NOTE: While not yet deprecated, we are moving away from this method
and encourage using the custom boto waiters as explained in
https://github.com/apache/airflow/tree/main/airflow/providers/amazon/aws/waiters
:param get_state_callable: A callable to run until it returns True
:param get_state_args: Arguments to pass to get_state_callable
:param parse_response: Dictionary keys to extract state from response of get_state_callable
:param desired_state: Wait until the getter returns this value
:param failure_states: A set of states which indicate failure and should throw an
exception if any are reached before the desired_state
:param object_type: Used for the reporting string. What are you waiting for? (application, job, etc)
:param action: Used for the reporting string. What action are you waiting for? (created, deleted, etc)
:param countdown: Number of seconds the waiter should wait for the desired state before timing out.
Defaults to 25 * 60 seconds. None = infinite.
:param check_interval_seconds: Number of seconds waiter should wait before attempting
to retry get_state_callable. Defaults to 60 seconds.
"""
while True:
state = get_state(get_state_callable(**get_state_args), parse_response)
if state in desired_state:
break
if state in failure_states:
raise AirflowException(f"{object_type.title()} reached failure state {state}.")
if countdown is None:
countdown = float("inf")
if countdown > check_interval_seconds:
countdown -= check_interval_seconds
log.info("Waiting for %s to be %s.", object_type.lower(), action.lower())
time.sleep(check_interval_seconds)
else:
message = f"{object_type.title()} still not {action.lower()} after the allocated time limit."
log.error(message)
raise RuntimeError(message)
def get_state(response, keys) -> str:
value = response
for key in keys:
if value is not None:
value = value.get(key, None)
return value
| 3,509 | 39.813953 | 106 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/tags.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
def format_tags(source: Any, *, key_label: str = "Key", value_label: str = "Value"):
"""
Format tags for boto call which expect a given format.
If given a dictionary, formats it as an array of objects with a key and a value field to be passed to boto
calls that expect this format.
Else, assumes that it's already in the right format and returns it as is. We do not validate
the format here since it's done by boto anyway, and the error would not be clearer if thrown from here.
:param source: a dict from which keys and values are read
:param key_label: optional, the label to use for keys if not "Key"
:param value_label: optional, the label to use for values if not "Value"
"""
if source is None:
return []
elif isinstance(source, dict):
return [{key_label: kvp[0], value_label: kvp[1]} for kvp in source.items()]
else:
return source
| 1,762 | 40.97619 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/eks_get_token.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import argparse
import json
from datetime import datetime, timedelta, timezone
from airflow.providers.amazon.aws.hooks.eks import EksHook
# Presigned STS urls are valid for 15 minutes, set token expiration to 1 minute before it expires for
# some cushion
TOKEN_EXPIRATION_MINUTES = 14
def get_expiration_time():
token_expiration = datetime.now(timezone.utc) + timedelta(minutes=TOKEN_EXPIRATION_MINUTES)
return token_expiration.strftime("%Y-%m-%dT%H:%M:%SZ")
def get_parser():
parser = argparse.ArgumentParser(description="Get a token for authentication with an Amazon EKS cluster.")
parser.add_argument(
"--cluster-name", help="The name of the cluster to generate kubeconfig file for.", required=True
)
parser.add_argument(
"--aws-conn-id",
help=(
"The Airflow connection used for AWS credentials. "
"If not specified or empty then the default boto3 behaviour is used."
),
)
parser.add_argument(
"--region-name", help="AWS region_name. If not specified then the default boto3 behaviour is used."
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
eks_hook = EksHook(aws_conn_id=args.aws_conn_id, region_name=args.region_name)
access_token = eks_hook.fetch_access_token_for_cluster(args.cluster_name)
access_token_expiration = get_expiration_time()
exec_credential_object = {
"kind": "ExecCredential",
"apiVersion": "client.authentication.k8s.io/v1alpha1",
"spec": {},
"status": {"expirationTimestamp": access_token_expiration, "token": access_token},
}
print(json.dumps(exec_credential_object))
if __name__ == "__main__":
main()
| 2,561 | 34.583333 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/rds.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from enum import Enum
class RdsDbType(Enum):
"""Only available types for the RDS."""
INSTANCE: str = "instance"
CLUSTER: str = "cluster"
| 973 | 35.074074 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import re
from datetime import datetime
from enum import Enum
from airflow.version import version
log = logging.getLogger(__name__)
def trim_none_values(obj: dict):
return {key: val for key, val in obj.items() if val is not None}
def datetime_to_epoch(date_time: datetime) -> int:
"""Convert a datetime object to an epoch integer (seconds)."""
return int(date_time.timestamp())
def datetime_to_epoch_ms(date_time: datetime) -> int:
"""Convert a datetime object to an epoch integer (milliseconds)."""
return int(date_time.timestamp() * 1_000)
def datetime_to_epoch_us(date_time: datetime) -> int:
"""Convert a datetime object to an epoch integer (microseconds)."""
return int(date_time.timestamp() * 1_000_000)
def get_airflow_version() -> tuple[int, ...]:
val = re.sub(r"(\d+\.\d+\.\d+).*", lambda x: x.group(1), version)
return tuple(int(x) for x in val.split("."))
class _StringCompareEnum(Enum):
"""
An Enum class which can be compared with regular `str` and subclasses.
This class avoids multiple inheritance such as AwesomeEnum(str, Enum)
which does not work well with templated_fields and Jinja templates.
"""
def __eq__(self, other):
if isinstance(other, str):
return self.value == other
return super().__eq__(other)
def __hash__(self):
return super().__hash__() # Need to set because we redefine __eq__
| 2,265 | 32.323529 | 75 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/waiter_with_logging.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import logging
import time
from typing import Any
import jmespath
from botocore.exceptions import WaiterError
from botocore.waiter import Waiter
from airflow.exceptions import AirflowException
def wait(
waiter: Waiter,
waiter_delay: int,
waiter_max_attempts: int,
args: dict[str, Any],
failure_message: str,
status_message: str,
status_args: list[str],
) -> None:
"""
Use a boto waiter to poll an AWS service for the specified state.
Although this function uses boto waiters to poll the state of the
service, it logs the response of the service after every attempt,
which is not currently supported by boto waiters.
:param waiter: The boto waiter to use.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param args: The arguments to pass to the waiter.
:param failure_message: The message to log if a failure state is reached.
:param status_message: The message logged when printing the status of the service.
:param status_args: A list containing the JMESPath queries to retrieve status information from
the waiter response.
e.g.
response = {"Cluster": {"state": "CREATING"}}
status_args = ["Cluster.state"]
response = {
"Clusters": [{"state": "CREATING", "details": "User initiated."},]
}
status_args = ["Clusters[0].state", "Clusters[0].details"]
"""
log = logging.getLogger(__name__)
attempt = 0
while True:
attempt += 1
try:
waiter.wait(**args, WaiterConfig={"MaxAttempts": 1})
break
except WaiterError as error:
if "terminal failure" in str(error):
log.error("%s: %s", failure_message, _LazyStatusFormatter(status_args, error.last_response))
raise AirflowException(f"{failure_message}: {error}")
log.info("%s: %s", status_message, _LazyStatusFormatter(status_args, error.last_response))
if attempt >= waiter_max_attempts:
raise AirflowException("Waiter error: max attempts reached")
time.sleep(waiter_delay)
async def async_wait(
waiter: Waiter,
waiter_delay: int,
waiter_max_attempts: int,
args: dict[str, Any],
failure_message: str,
status_message: str,
status_args: list[str],
):
"""
Use an async boto waiter to poll an AWS service for the specified state.
Although this function uses boto waiters to poll the state of the
service, it logs the response of the service after every attempt,
which is not currently supported by boto waiters.
:param waiter: The boto waiter to use.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param args: The arguments to pass to the waiter.
:param failure_message: The message to log if a failure state is reached.
:param status_message: The message logged when printing the status of the service.
:param status_args: A list containing the JMESPath queries to retrieve status information from
the waiter response.
e.g.
response = {"Cluster": {"state": "CREATING"}}
status_args = ["Cluster.state"]
response = {
"Clusters": [{"state": "CREATING", "details": "User initiated."},]
}
status_args = ["Clusters[0].state", "Clusters[0].details"]
"""
log = logging.getLogger(__name__)
attempt = 0
while True:
attempt += 1
try:
await waiter.wait(**args, WaiterConfig={"MaxAttempts": 1})
break
except WaiterError as error:
if "terminal failure" in str(error):
log.error("%s: %s", failure_message, _LazyStatusFormatter(status_args, error.last_response))
raise AirflowException(f"{failure_message}: {error}")
log.info("%s: %s", status_message, _LazyStatusFormatter(status_args, error.last_response))
if attempt >= waiter_max_attempts:
raise AirflowException("Waiter error: max attempts reached")
await asyncio.sleep(waiter_delay)
class _LazyStatusFormatter:
"""
Contains the info necessary to extract the status from a response; only computes the value when necessary.
Used to avoid computations if the logs are disabled at the given level.
"""
def __init__(self, jmespath_queries: list[str], response: dict[str, Any]):
self.jmespath_queries = jmespath_queries
self.response = response
def __str__(self):
"""Loop through the args list and generate a string containing values from the waiter response."""
values = []
for query in self.jmespath_queries:
value = jmespath.search(query, self.response)
if value is not None and value != "":
values.append(str(value))
return " - ".join(values)
| 5,890 | 36.762821 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/task_log_fetcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from datetime import datetime, timedelta
from logging import Logger
from threading import Event, Thread
from typing import Generator
from botocore.exceptions import ClientError, ConnectionClosedError
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
class AwsTaskLogFetcher(Thread):
"""Fetch Cloudwatch log events with specific interval and send the log events to the logger.info."""
def __init__(
self,
*,
log_group: str,
log_stream_name: str,
fetch_interval: timedelta,
logger: Logger,
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
):
super().__init__()
self._event = Event()
self.fetch_interval = fetch_interval
self.logger = logger
self.log_group = log_group
self.log_stream_name = log_stream_name
self.hook = AwsLogsHook(aws_conn_id=aws_conn_id, region_name=region_name)
def run(self) -> None:
continuation_token = AwsLogsHook.ContinuationToken()
while not self.is_stopped():
time.sleep(self.fetch_interval.total_seconds())
log_events = self._get_log_events(continuation_token)
for log_event in log_events:
self.logger.info(self.event_to_str(log_event))
def _get_log_events(self, skip_token: AwsLogsHook.ContinuationToken | None = None) -> Generator:
if skip_token is None:
skip_token = AwsLogsHook.ContinuationToken()
try:
yield from self.hook.get_log_events(
self.log_group, self.log_stream_name, continuation_token=skip_token
)
except ClientError as error:
if error.response["Error"]["Code"] != "ResourceNotFoundException":
self.logger.warning("Error on retrieving Cloudwatch log events", error)
else:
self.logger.info(
"Cannot find log stream yet, it can take a couple of seconds to show up. "
"If this error persists, check that the log group and stream are correct: "
"group: %s\tstream: %s",
self.log_group,
self.log_stream_name,
)
yield from ()
except ConnectionClosedError as error:
self.logger.warning("ConnectionClosedError on retrieving Cloudwatch log events", error)
yield from ()
@staticmethod
def event_to_str(event: dict) -> str:
event_dt = datetime.utcfromtimestamp(event["timestamp"] / 1000.0)
formatted_event_dt = event_dt.strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
message = event["message"]
return f"[{formatted_event_dt}] {message}"
def get_last_log_messages(self, number_messages) -> list:
"""
Gets the last logs messages in one single request.
NOTE: some restrictions apply:
- if logs are too old, the response will be empty
- the max number of messages we can retrieve is constrained by cloudwatch limits (10,000).
"""
response = self.hook.conn.get_log_events(
logGroupName=self.log_group,
logStreamName=self.log_stream_name,
startFromHead=False,
limit=number_messages,
)
return [log["message"] for log in response["events"]]
def get_last_log_message(self) -> str | None:
try:
return self.get_last_log_messages(1)[0]
except IndexError:
return None
def is_stopped(self) -> bool:
return self._event.is_set()
def stop(self):
self._event.set()
| 4,491 | 36.433333 | 104 | py |
airflow | airflow-main/airflow/providers/amazon/aws/utils/redshift.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from botocore.credentials import ReadOnlyCredentials
log = logging.getLogger(__name__)
def build_credentials_block(credentials: ReadOnlyCredentials) -> str:
"""Generate AWS credentials block for Redshift COPY and UNLOAD commands.
See AWS docs for details:
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials
:param credentials: ReadOnlyCredentials object from `botocore`
"""
if credentials.token:
log.debug("STS token found in credentials, including it in the command")
# these credentials are obtained from AWS STS
# so the token must be included in the CREDENTIALS clause
credentials_line = (
f"aws_access_key_id={credentials.access_key};"
f"aws_secret_access_key={credentials.secret_key};"
f"token={credentials.token}"
)
else:
credentials_line = (
f"aws_access_key_id={credentials.access_key};aws_secret_access_key={credentials.secret_key}"
)
return credentials_line
| 1,897 | 36.96 | 104 | py |
airflow | airflow-main/airflow/providers/amazon/aws/log/s3_task_handler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import pathlib
import shutil
from functools import cached_property
from packaging.version import Version
from airflow.configuration import conf
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
def get_default_delete_local_copy():
"""Load delete_local_logs conf if Airflow version > 2.6 and return False if not.
TODO: delete this function when min airflow version >= 2.6
"""
from airflow.version import version
if Version(version) < Version("2.6"):
return False
return conf.getboolean("logging", "delete_local_logs")
class S3TaskHandler(FileTaskHandler, LoggingMixin):
"""
S3TaskHandler is a python log handler that handles and reads task instance logs.
It extends airflow FileTaskHandler and uploads to and reads from S3 remote storage.
"""
trigger_should_wrap = True
def __init__(
self, base_log_folder: str, s3_log_folder: str, filename_template: str | None = None, **kwargs
):
super().__init__(base_log_folder, filename_template)
self.remote_base = s3_log_folder
self.log_relative_path = ""
self._hook = None
self.closed = False
self.upload_on_close = True
self.delete_local_copy = (
kwargs["delete_local_copy"] if "delete_local_copy" in kwargs else get_default_delete_local_copy()
)
@cached_property
def hook(self):
"""Returns S3Hook."""
return S3Hook(
aws_conn_id=conf.get("logging", "REMOTE_LOG_CONN_ID"), transfer_config_args={"use_threads": False}
)
def set_context(self, ti):
super().set_context(ti)
# Local location and remote location is needed to open and
# upload local log file to S3 remote storage.
full_path = self.handler.baseFilename
self.log_relative_path = pathlib.Path(full_path).relative_to(self.local_base).as_posix()
is_trigger_log_context = getattr(ti, "is_trigger_log_context", False)
self.upload_on_close = is_trigger_log_context or not ti.raw
# Clear the file first so that duplicate data is not uploaded
# when re-using the same path (e.g. with rescheduled sensors)
if self.upload_on_close:
with open(self.handler.baseFilename, "w"):
pass
def close(self):
"""Close and upload local log file to remote storage S3."""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
log = pathlib.Path(local_loc).read_text()
write_to_s3 = self.s3_write(log, remote_loc)
if write_to_s3 and self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read_remote_logs(self, ti, try_number, metadata=None) -> tuple[list[str], list[str]]:
# Explicitly getting log relative path is necessary as the given
# task instance might be different than task instance passed in
# in set_context method.
worker_log_rel_path = self._render_filename(ti, try_number)
logs = []
messages = []
bucket, prefix = self.hook.parse_s3_url(s3url=os.path.join(self.remote_base, worker_log_rel_path))
keys = self.hook.list_keys(bucket_name=bucket, prefix=prefix)
if keys:
keys = [f"s3://{bucket}/{key}" for key in keys]
messages.extend(["Found logs in s3:", *[f" * {x}" for x in sorted(keys)]])
for key in sorted(keys):
logs.append(self.s3_read(key, return_error=True))
else:
messages.append(f"No logs found on s3 for ti={ti}")
return messages, logs
def _read(self, ti, try_number, metadata=None):
"""
Read logs of given task instance and try_number from S3 remote storage.
If failed, read the log from task instance host machine.
todo: when min airflow version >= 2.6 then remove this method (``_read``)
:param ti: task instance object
:param try_number: task instance try_number to read logs from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
"""
# from airflow 2.6 we no longer implement the _read method
if hasattr(super(), "_read_remote_logs"):
return super()._read(ti, try_number, metadata)
# if we get here, we're on airflow < 2.6 and we use this backcompat logic
messages, logs = self._read_remote_logs(ti, try_number, metadata)
if logs:
return "".join(f"*** {x}\n" for x in messages) + "\n".join(logs), {"end_of_log": True}
else:
if metadata and metadata.get("log_pos", 0) > 0:
log_prefix = ""
else:
log_prefix = "*** Falling back to local log\n"
local_log, metadata = super()._read(ti, try_number, metadata)
return f"{log_prefix}{local_log}", metadata
def s3_log_exists(self, remote_log_location: str) -> bool:
"""
Check if remote_log_location exists in remote storage.
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
return self.hook.check_for_key(remote_log_location)
def s3_read(self, remote_log_location: str, return_error: bool = False) -> str:
"""
Returns the log found at the remote_log_location. Return '' if no logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:return: the log found at the remote_log_location
"""
try:
return self.hook.read_key(remote_log_location)
except Exception as error:
msg = f"Could not read logs from {remote_log_location} with error: {error}"
self.log.exception(msg)
# return error if needed
if return_error:
return msg
return ""
def s3_write(self, log: str, remote_log_location: str, append: bool = True, max_retry: int = 1) -> bool:
"""
Write the log to the remote_log_location; return `True` or fails silently and return `False`.
:param log: the log to write to the remote_log_location
:param remote_log_location: the log's location in remote storage
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:param max_retry: Maximum number of times to retry on upload failure
:return: whether the log is successfully written to remote location or not.
"""
try:
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = "\n".join([old_log, log]) if old_log else log
except Exception:
self.log.exception("Could not verify previous log to append")
return False
# Default to a single retry attempt because s3 upload failures are
# rare but occasionally occur. Multiple retry attempts are unlikely
# to help as they usually indicate non-ephemeral errors.
for try_num in range(1 + max_retry):
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=conf.getboolean("logging", "ENCRYPT_S3_LOGS"),
)
break
except Exception:
if try_num < max_retry:
self.log.warning("Failed attempt to write logs to %s, will retry", remote_log_location)
else:
self.log.exception("Could not write logs to %s", remote_log_location)
return False
return True
| 9,600 | 41.295154 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/log/cloudwatch_task_handler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import datetime
from functools import cached_property
import watchtower
from airflow.configuration import conf
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
class CloudwatchTaskHandler(FileTaskHandler, LoggingMixin):
"""
CloudwatchTaskHandler is a python log handler that handles and reads task instance logs.
It extends airflow FileTaskHandler and uploads to and reads from Cloudwatch.
:param base_log_folder: base folder to store logs locally
:param log_group_arn: ARN of the Cloudwatch log group for remote log storage
with format ``arn:aws:logs:{region name}:{account id}:log-group:{group name}``
:param filename_template: template for file name (local storage) or log stream name (remote)
"""
trigger_should_wrap = True
def __init__(self, base_log_folder: str, log_group_arn: str, filename_template: str | None = None):
super().__init__(base_log_folder, filename_template)
split_arn = log_group_arn.split(":")
self.handler = None
self.log_group = split_arn[6]
self.region_name = split_arn[3]
self.closed = False
@cached_property
def hook(self):
"""Returns AwsLogsHook."""
return AwsLogsHook(
aws_conn_id=conf.get("logging", "REMOTE_LOG_CONN_ID"), region_name=self.region_name
)
def _render_filename(self, ti, try_number):
# Replace unsupported log group name characters
return super()._render_filename(ti, try_number).replace(":", "_")
def set_context(self, ti):
super().set_context(ti)
self.handler = watchtower.CloudWatchLogHandler(
log_group_name=self.log_group,
log_stream_name=self._render_filename(ti, ti.try_number),
use_queues=not getattr(ti, "is_trigger_log_context", False),
boto3_client=self.hook.get_conn(),
)
def close(self):
"""Close the handler responsible for the upload of the local log file to Cloudwatch."""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
if self.handler is not None:
self.handler.close()
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read(self, task_instance, try_number, metadata=None):
stream_name = self._render_filename(task_instance, try_number)
try:
return (
f"*** Reading remote log from Cloudwatch log_group: {self.log_group} "
f"log_stream: {stream_name}.\n{self.get_cloudwatch_logs(stream_name=stream_name)}\n",
{"end_of_log": True},
)
except Exception as e:
log = (
f"*** Unable to read remote logs from Cloudwatch (log_group: {self.log_group}, log_stream: "
f"{stream_name})\n*** {str(e)}\n\n"
)
self.log.error(log)
local_log, metadata = super()._read(task_instance, try_number, metadata)
log += local_log
return log, metadata
def get_cloudwatch_logs(self, stream_name: str) -> str:
"""
Return all logs from the given log stream.
:param stream_name: name of the Cloudwatch log stream to get all logs from
:return: string of all logs from the given log stream
"""
events = self.hook.get_log_events(
log_group=self.log_group,
log_stream_name=stream_name,
start_from_head=True,
)
return "\n".join(self._event_to_str(event) for event in events)
def _event_to_str(self, event: dict) -> str:
event_dt = datetime.utcfromtimestamp(event["timestamp"] / 1000.0)
formatted_event_dt = event_dt.strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
message = event["message"]
return f"[{formatted_event_dt}] {message}"
| 5,057 | 39.464 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/log/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/emr.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import timedelta
from functools import cached_property
from typing import TYPE_CHECKING, Any, Iterable, Sequence
from deprecated import deprecated
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.emr import EmrContainerHook, EmrHook, EmrServerlessHook
from airflow.providers.amazon.aws.links.emr import EmrClusterLink, EmrLogsLink, get_log_uri
from airflow.providers.amazon.aws.triggers.emr import (
EmrContainerTrigger,
EmrStepSensorTrigger,
EmrTerminateJobFlowTrigger,
)
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class EmrBaseSensor(BaseSensorOperator):
"""
Contains general sensor behavior for EMR.
Subclasses should implement following methods:
- ``get_emr_response()``
- ``state_from_response()``
- ``failure_message_from_response()``
Subclasses should set ``target_states`` and ``failed_states`` fields.
:param aws_conn_id: aws connection to use
"""
ui_color = "#66c3ff"
def __init__(self, *, aws_conn_id: str = "aws_default", **kwargs):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.target_states: Iterable[str] = [] # will be set in subclasses
self.failed_states: Iterable[str] = [] # will be set in subclasses
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> EmrHook:
return self.hook
@cached_property
def hook(self) -> EmrHook:
return EmrHook(aws_conn_id=self.aws_conn_id)
def poke(self, context: Context):
response = self.get_emr_response(context=context)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
self.log.info("Bad HTTP response: %s", response)
return False
state = self.state_from_response(response)
self.log.info("Job flow currently %s", state)
if state in self.target_states:
return True
if state in self.failed_states:
raise AirflowException(f"EMR job failed: {self.failure_message_from_response(response)}")
return False
def get_emr_response(self, context: Context) -> dict[str, Any]:
"""
Make an API call with boto3 and get response.
:return: response
"""
raise NotImplementedError("Please implement get_emr_response() in subclass")
@staticmethod
def state_from_response(response: dict[str, Any]) -> str:
"""
Get state from boto3 response.
:param response: response from AWS API
:return: state
"""
raise NotImplementedError("Please implement state_from_response() in subclass")
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get state from boto3 response.
:param response: response from AWS API
:return: failure message
"""
raise NotImplementedError("Please implement failure_message_from_response() in subclass")
class EmrServerlessJobSensor(BaseSensorOperator):
"""
Poll the state of the job run until it reaches a terminal state; fails if the job run fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EmrServerlessJobSensor`
:param application_id: application_id to check the state of
:param job_run_id: job_run_id to check the state of
:param target_states: a set of states to wait for, defaults to 'SUCCESS'
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
"""
template_fields: Sequence[str] = (
"application_id",
"job_run_id",
)
def __init__(
self,
*,
application_id: str,
job_run_id: str,
target_states: set | frozenset = frozenset(EmrServerlessHook.JOB_SUCCESS_STATES),
aws_conn_id: str = "aws_default",
**kwargs: Any,
) -> None:
self.aws_conn_id = aws_conn_id
self.target_states = target_states
self.application_id = application_id
self.job_run_id = job_run_id
super().__init__(**kwargs)
def poke(self, context: Context) -> bool:
response = self.hook.conn.get_job_run(applicationId=self.application_id, jobRunId=self.job_run_id)
state = response["jobRun"]["state"]
if state in EmrServerlessHook.JOB_FAILURE_STATES:
failure_message = f"EMR Serverless job failed: {self.failure_message_from_response(response)}"
raise AirflowException(failure_message)
return state in self.target_states
@cached_property
def hook(self) -> EmrServerlessHook:
"""Create and return an EmrServerlessHook."""
return EmrServerlessHook(aws_conn_id=self.aws_conn_id)
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
return response["jobRun"]["stateDetails"]
class EmrServerlessApplicationSensor(BaseSensorOperator):
"""
Poll the state of the application until it reaches a terminal state; fails if the application fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EmrServerlessApplicationSensor`
:param application_id: application_id to check the state of
:param target_states: a set of states to wait for, defaults to {'CREATED', 'STARTED'}
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
"""
template_fields: Sequence[str] = ("application_id",)
def __init__(
self,
*,
application_id: str,
target_states: set | frozenset = frozenset(EmrServerlessHook.APPLICATION_SUCCESS_STATES),
aws_conn_id: str = "aws_default",
**kwargs: Any,
) -> None:
self.aws_conn_id = aws_conn_id
self.target_states = target_states
self.application_id = application_id
super().__init__(**kwargs)
def poke(self, context: Context) -> bool:
response = self.hook.conn.get_application(applicationId=self.application_id)
state = response["application"]["state"]
if state in EmrServerlessHook.APPLICATION_FAILURE_STATES:
failure_message = f"EMR Serverless job failed: {self.failure_message_from_response(response)}"
raise AirflowException(failure_message)
return state in self.target_states
@cached_property
def hook(self) -> EmrServerlessHook:
"""Create and return an EmrServerlessHook."""
return EmrServerlessHook(aws_conn_id=self.aws_conn_id)
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
return response["application"]["stateDetails"]
class EmrContainerSensor(BaseSensorOperator):
"""
Poll the state of the job run until it reaches a terminal state; fail if the job run fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EmrContainerSensor`
:param job_id: job_id to check the state of
:param max_retries: Number of times to poll for query state before
returning the current state, defaults to None
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:param poll_interval: Time in seconds to wait between two consecutive call to
check query status on athena, defaults to 10
:param deferrable: Run sensor in the deferrable mode.
"""
INTERMEDIATE_STATES = (
"PENDING",
"SUBMITTED",
"RUNNING",
)
FAILURE_STATES = (
"FAILED",
"CANCELLED",
"CANCEL_PENDING",
)
SUCCESS_STATES = ("COMPLETED",)
template_fields: Sequence[str] = ("virtual_cluster_id", "job_id")
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
*,
virtual_cluster_id: str,
job_id: str,
max_retries: int | None = None,
aws_conn_id: str = "aws_default",
poll_interval: int = 10,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.virtual_cluster_id = virtual_cluster_id
self.job_id = job_id
self.poll_interval = poll_interval
self.max_retries = max_retries
self.deferrable = deferrable
@cached_property
def hook(self) -> EmrContainerHook:
return EmrContainerHook(self.aws_conn_id, virtual_cluster_id=self.virtual_cluster_id)
def poke(self, context: Context) -> bool:
state = self.hook.poll_query_status(
self.job_id,
max_polling_attempts=self.max_retries,
poll_interval=self.poll_interval,
)
if state in self.FAILURE_STATES:
raise AirflowException("EMR Containers sensor failed")
if state in self.INTERMEDIATE_STATES:
return False
return True
def execute(self, context: Context):
if not self.deferrable:
super().execute(context=context)
else:
timeout = (
timedelta(seconds=self.max_retries * self.poll_interval + 60)
if self.max_retries
else self.execution_timeout
)
self.defer(
timeout=timeout,
trigger=EmrContainerTrigger(
virtual_cluster_id=self.virtual_cluster_id,
job_id=self.job_id,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.poll_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
else:
self.log.info(event["message"])
class EmrNotebookExecutionSensor(EmrBaseSensor):
"""
Poll the EMR notebook until it reaches any of the target states; raise AirflowException on failure.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EmrNotebookExecutionSensor`
:param notebook_execution_id: Unique id of the notebook execution to be poked.
:target_states: the states the sensor will wait for the execution to reach.
Default target_states is ``FINISHED``.
:failed_states: if the execution reaches any of the failed_states, the sensor will fail.
Default failed_states is ``FAILED``.
"""
template_fields: Sequence[str] = ("notebook_execution_id",)
FAILURE_STATES = {"FAILED"}
COMPLETED_STATES = {"FINISHED"}
def __init__(
self,
notebook_execution_id: str,
target_states: Iterable[str] | None = None,
failed_states: Iterable[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.notebook_execution_id = notebook_execution_id
self.target_states = target_states or self.COMPLETED_STATES
self.failed_states = failed_states or self.FAILURE_STATES
def get_emr_response(self, context: Context) -> dict[str, Any]:
emr_client = self.hook.conn
self.log.info("Poking notebook %s", self.notebook_execution_id)
return emr_client.describe_notebook_execution(NotebookExecutionId=self.notebook_execution_id)
@staticmethod
def state_from_response(response: dict[str, Any]) -> str:
"""
Make an API call with boto3 and get cluster-level details.
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/emr.html#EMR.Client.describe_cluster
:return: response
"""
return response["NotebookExecution"]["Status"]
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
cluster_status = response["NotebookExecution"]
return cluster_status.get("LastStateChangeReason", None)
class EmrJobFlowSensor(EmrBaseSensor):
"""
Poll the EMR JobFlow Cluster until it reaches any of the target states; raise AirflowException on failure.
With the default target states, sensor waits cluster to be terminated.
When target_states is set to ['RUNNING', 'WAITING'] sensor waits
until job flow to be ready (after 'STARTING' and 'BOOTSTRAPPING' states)
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EmrJobFlowSensor`
:param job_flow_id: job_flow_id to check the state of
:param target_states: the target states, sensor waits until
job flow reaches any of these states. In deferrable mode it would
run until reach the terminal state.
:param failed_states: the failure states, sensor fails when
job flow reaches any of these states
:param max_attempts: Maximum number of tries before failing
:param deferrable: Run sensor in the deferrable mode.
"""
template_fields: Sequence[str] = ("job_flow_id", "target_states", "failed_states")
template_ext: Sequence[str] = ()
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
job_flow_id: str,
target_states: Iterable[str] | None = None,
failed_states: Iterable[str] | None = None,
max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.job_flow_id = job_flow_id
self.target_states = target_states or ["TERMINATED"]
self.failed_states = failed_states or ["TERMINATED_WITH_ERRORS"]
self.max_attempts = max_attempts
self.deferrable = deferrable
def get_emr_response(self, context: Context) -> dict[str, Any]:
"""
Make an API call with boto3 and get cluster-level details.
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/emr.html#EMR.Client.describe_cluster
:return: response
"""
emr_client = self.hook.conn
self.log.info("Poking cluster %s", self.job_flow_id)
response = emr_client.describe_cluster(ClusterId=self.job_flow_id)
EmrClusterLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.job_flow_id,
)
EmrLogsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.job_flow_id,
log_uri=get_log_uri(cluster=response),
)
return response
@staticmethod
def state_from_response(response: dict[str, Any]) -> str:
"""
Get state from response dictionary.
:param response: response from AWS API
:return: current state of the cluster
"""
return response["Cluster"]["Status"]["State"]
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
cluster_status = response["Cluster"]["Status"]
state_change_reason = cluster_status.get("StateChangeReason")
if state_change_reason:
return (
f"for code: {state_change_reason.get('Code', 'No code')} "
f"with message {state_change_reason.get('Message', 'Unknown')}"
)
return None
def execute(self, context: Context) -> None:
if not self.deferrable:
super().execute(context=context)
elif not self.poke(context):
self.defer(
timeout=timedelta(seconds=self.poke_interval * self.max_attempts),
trigger=EmrTerminateJobFlowTrigger(
job_flow_id=self.job_flow_id,
waiter_max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
waiter_delay=int(self.poke_interval),
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
self.log.info("Job completed.")
class EmrStepSensor(EmrBaseSensor):
"""
Poll the state of the step until it reaches any of the target states; raise AirflowException on failure.
With the default target states, sensor waits step to be completed.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EmrStepSensor`
:param job_flow_id: job_flow_id which contains the step check the state of
:param step_id: step to check the state of
:param target_states: the target states, sensor waits until
step reaches any of these states. In case of deferrable sensor it will
for reach to terminal state
:param failed_states: the failure states, sensor fails when
step reaches any of these states
:param max_attempts: Maximum number of tries before failing
:param deferrable: Run sensor in the deferrable mode.
"""
template_fields: Sequence[str] = ("job_flow_id", "step_id", "target_states", "failed_states")
template_ext: Sequence[str] = ()
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
job_flow_id: str,
step_id: str,
target_states: Iterable[str] | None = None,
failed_states: Iterable[str] | None = None,
max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.job_flow_id = job_flow_id
self.step_id = step_id
self.target_states = target_states or ["COMPLETED"]
self.failed_states = failed_states or ["CANCELLED", "FAILED", "INTERRUPTED"]
self.max_attempts = max_attempts
self.deferrable = deferrable
def get_emr_response(self, context: Context) -> dict[str, Any]:
"""
Make an API call with boto3 and get details about the cluster step.
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/emr.html#EMR.Client.describe_step
:return: response
"""
emr_client = self.hook.conn
self.log.info("Poking step %s on cluster %s", self.step_id, self.job_flow_id)
response = emr_client.describe_step(ClusterId=self.job_flow_id, StepId=self.step_id)
EmrClusterLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.job_flow_id,
)
EmrLogsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.job_flow_id,
log_uri=get_log_uri(emr_client=emr_client, job_flow_id=self.job_flow_id),
)
return response
@staticmethod
def state_from_response(response: dict[str, Any]) -> str:
"""
Get state from response dictionary.
:param response: response from AWS API
:return: execution state of the cluster step
"""
return response["Step"]["Status"]["State"]
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
fail_details = response["Step"]["Status"].get("FailureDetails")
if fail_details:
return (
f"for reason {fail_details.get('Reason')} "
f"with message {fail_details.get('Message')} and log file {fail_details.get('LogFile')}"
)
return None
def execute(self, context: Context) -> None:
if not self.deferrable:
super().execute(context=context)
elif not self.poke(context):
self.defer(
timeout=timedelta(seconds=self.max_attempts * self.poke_interval),
trigger=EmrStepSensorTrigger(
job_flow_id=self.job_flow_id,
step_id=self.step_id,
waiter_delay=int(self.poke_interval),
waiter_max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
self.log.info("Job completed.")
| 22,909 | 34.62986 | 123 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/glue.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue import GlueJobHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class GlueJobSensor(BaseSensorOperator):
"""
Waits for an AWS Glue Job to reach any of the status below.
'FAILED', 'STOPPED', 'SUCCEEDED'
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:GlueJobSensor`
:param job_name: The AWS Glue Job unique name
:param run_id: The AWS Glue current running job identifier
:param verbose: If True, more Glue Job Run logs show in the Airflow Task Logs. (default: False)
"""
template_fields: Sequence[str] = ("job_name", "run_id")
def __init__(
self,
*,
job_name: str,
run_id: str,
verbose: bool = False,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.job_name = job_name
self.run_id = run_id
self.verbose = verbose
self.aws_conn_id = aws_conn_id
self.success_states: list[str] = ["SUCCEEDED"]
self.errored_states: list[str] = ["FAILED", "STOPPED", "TIMEOUT"]
self.next_log_tokens = GlueJobHook.LogContinuationTokens()
@cached_property
def hook(self):
return GlueJobHook(aws_conn_id=self.aws_conn_id)
def poke(self, context: Context):
self.log.info("Poking for job run status :for Glue Job %s and ID %s", self.job_name, self.run_id)
job_state = self.hook.get_job_state(job_name=self.job_name, run_id=self.run_id)
try:
if job_state in self.success_states:
self.log.info("Exiting Job %s Run State: %s", self.run_id, job_state)
return True
elif job_state in self.errored_states:
job_error_message = "Exiting Job %s Run State: %s", self.run_id, job_state
self.log.info(job_error_message)
raise AirflowException(job_error_message)
else:
return False
finally:
if self.verbose:
self.hook.print_job_logs(
job_name=self.job_name,
run_id=self.run_id,
continuation_tokens=self.next_log_tokens,
)
| 3,319 | 35.483516 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/sagemaker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from deprecated import deprecated
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.sagemaker import LogState, SageMakerHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class SageMakerBaseSensor(BaseSensorOperator):
"""
Contains general sensor behavior for SageMaker.
Subclasses should implement get_sagemaker_response() and state_from_response() methods.
Subclasses should also implement NON_TERMINAL_STATES and FAILED_STATE methods.
"""
ui_color = "#ededed"
def __init__(self, *, aws_conn_id: str = "aws_default", resource_type: str = "job", **kwargs):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.resource_type = resource_type # only used for logs, to say what kind of resource we are sensing
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> SageMakerHook:
"""Get SageMakerHook."""
return self.hook
@cached_property
def hook(self) -> SageMakerHook:
return SageMakerHook(aws_conn_id=self.aws_conn_id)
def poke(self, context: Context):
response = self.get_sagemaker_response()
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
self.log.info("Bad HTTP response: %s", response)
return False
state = self.state_from_response(response)
self.log.info("%s currently %s", self.resource_type, state)
if state in self.non_terminal_states():
return False
if state in self.failed_states():
failed_reason = self.get_failed_reason_from_response(response)
raise AirflowException(
f"Sagemaker {self.resource_type} failed for the following reason: {failed_reason}"
)
return True
def non_terminal_states(self) -> set[str]:
"""Placeholder for returning states with should not terminate."""
raise NotImplementedError("Please implement non_terminal_states() in subclass")
def failed_states(self) -> set[str]:
"""Placeholder for returning states with are considered failed."""
raise NotImplementedError("Please implement failed_states() in subclass")
def get_sagemaker_response(self) -> dict:
"""Placeholder for checking status of a SageMaker task."""
raise NotImplementedError("Please implement get_sagemaker_response() in subclass")
def get_failed_reason_from_response(self, response: dict) -> str:
"""Placeholder for extracting the reason for failure from an AWS response."""
return "Unknown"
def state_from_response(self, response: dict) -> str:
"""Placeholder for extracting the state from an AWS response."""
raise NotImplementedError("Please implement state_from_response() in subclass")
class SageMakerEndpointSensor(SageMakerBaseSensor):
"""
Poll the endpoint state until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerEndpointSensor`
:param endpoint_name: Name of the endpoint instance to watch.
"""
template_fields: Sequence[str] = ("endpoint_name",)
template_ext: Sequence[str] = ()
def __init__(self, *, endpoint_name, **kwargs):
super().__init__(**kwargs)
self.endpoint_name = endpoint_name
def non_terminal_states(self):
return SageMakerHook.endpoint_non_terminal_states
def failed_states(self):
return SageMakerHook.failed_states
def get_sagemaker_response(self):
self.log.info("Poking Sagemaker Endpoint %s", self.endpoint_name)
return self.hook.describe_endpoint(self.endpoint_name)
def get_failed_reason_from_response(self, response):
return response["FailureReason"]
def state_from_response(self, response):
return response["EndpointStatus"]
class SageMakerTransformSensor(SageMakerBaseSensor):
"""
Poll the transform job until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerTransformSensor`
:param job_name: Name of the transform job to watch.
"""
template_fields: Sequence[str] = ("job_name",)
template_ext: Sequence[str] = ()
def __init__(self, *, job_name: str, **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
def non_terminal_states(self):
return SageMakerHook.non_terminal_states
def failed_states(self):
return SageMakerHook.failed_states
def get_sagemaker_response(self):
self.log.info("Poking Sagemaker Transform Job %s", self.job_name)
return self.hook.describe_transform_job(self.job_name)
def get_failed_reason_from_response(self, response):
return response["FailureReason"]
def state_from_response(self, response):
return response["TransformJobStatus"]
class SageMakerTuningSensor(SageMakerBaseSensor):
"""
Poll the tuning state until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerTuningSensor`
:param job_name: Name of the tuning instance to watch.
"""
template_fields: Sequence[str] = ("job_name",)
template_ext: Sequence[str] = ()
def __init__(self, *, job_name: str, **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
def non_terminal_states(self):
return SageMakerHook.non_terminal_states
def failed_states(self):
return SageMakerHook.failed_states
def get_sagemaker_response(self):
self.log.info("Poking Sagemaker Tuning Job %s", self.job_name)
return self.hook.describe_tuning_job(self.job_name)
def get_failed_reason_from_response(self, response):
return response["FailureReason"]
def state_from_response(self, response):
return response["HyperParameterTuningJobStatus"]
class SageMakerTrainingSensor(SageMakerBaseSensor):
"""
Poll the training job until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerTrainingSensor`
:param job_name: Name of the training job to watch.
:param print_log: Prints the cloudwatch log if True; Defaults to True.
"""
template_fields: Sequence[str] = ("job_name",)
template_ext: Sequence[str] = ()
def __init__(self, *, job_name, print_log=True, **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
self.print_log = print_log
self.positions = {}
self.stream_names = []
self.instance_count: int | None = None
self.state: int | None = None
self.last_description = None
self.last_describe_job_call = None
self.log_resource_inited = False
def init_log_resource(self, hook: SageMakerHook) -> None:
"""Set tailing LogState for associated training job."""
description = hook.describe_training_job(self.job_name)
self.instance_count = description["ResourceConfig"]["InstanceCount"]
status = description["TrainingJobStatus"]
job_already_completed = status not in self.non_terminal_states()
self.state = LogState.COMPLETE if job_already_completed else LogState.TAILING
self.last_description = description
self.last_describe_job_call = time.monotonic()
self.log_resource_inited = True
def non_terminal_states(self):
return SageMakerHook.non_terminal_states
def failed_states(self):
return SageMakerHook.failed_states
def get_sagemaker_response(self):
if self.print_log:
if not self.log_resource_inited:
self.init_log_resource(self.hook)
(
self.state,
self.last_description,
self.last_describe_job_call,
) = self.hook.describe_training_job_with_log(
self.job_name,
self.positions,
self.stream_names,
self.instance_count,
self.state,
self.last_description,
self.last_describe_job_call,
)
else:
self.last_description = self.hook.describe_training_job(self.job_name)
status = self.state_from_response(self.last_description)
if (status not in self.non_terminal_states()) and (status not in self.failed_states()):
billable_time = (
self.last_description["TrainingEndTime"] - self.last_description["TrainingStartTime"]
) * self.last_description["ResourceConfig"]["InstanceCount"]
self.log.info("Billable seconds: %s", (int(billable_time.total_seconds()) + 1))
return self.last_description
def get_failed_reason_from_response(self, response):
return response["FailureReason"]
def state_from_response(self, response):
return response["TrainingJobStatus"]
class SageMakerPipelineSensor(SageMakerBaseSensor):
"""
Poll the pipeline until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerPipelineSensor`
:param pipeline_exec_arn: ARN of the pipeline to watch.
:param verbose: Whether to print steps details while waiting for completion.
Defaults to true, consider turning off for pipelines that have thousands of steps.
"""
template_fields: Sequence[str] = ("pipeline_exec_arn",)
def __init__(self, *, pipeline_exec_arn: str, verbose: bool = True, **kwargs):
super().__init__(resource_type="pipeline", **kwargs)
self.pipeline_exec_arn = pipeline_exec_arn
self.verbose = verbose
def non_terminal_states(self) -> set[str]:
return SageMakerHook.pipeline_non_terminal_states
def failed_states(self) -> set[str]:
return SageMakerHook.failed_states
def get_sagemaker_response(self) -> dict:
self.log.info("Poking Sagemaker Pipeline Execution %s", self.pipeline_exec_arn)
return self.hook.describe_pipeline_exec(self.pipeline_exec_arn, self.verbose)
def state_from_response(self, response: dict) -> str:
return response["PipelineExecutionStatus"]
class SageMakerAutoMLSensor(SageMakerBaseSensor):
"""
Poll the auto ML job until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerAutoMLSensor`
:param job_name: unique name of the AutoML job to watch.
"""
template_fields: Sequence[str] = ("job_name",)
def __init__(self, *, job_name: str, **kwargs):
super().__init__(resource_type="autoML job", **kwargs)
self.job_name = job_name
def non_terminal_states(self) -> set[str]:
return SageMakerHook.non_terminal_states
def failed_states(self) -> set[str]:
return SageMakerHook.failed_states
def get_sagemaker_response(self) -> dict:
self.log.info("Poking Sagemaker AutoML Execution %s", self.job_name)
return self.hook._describe_auto_ml_job(self.job_name)
def state_from_response(self, response: dict) -> str:
return response["AutoMLJobStatus"]
| 12,794 | 36.632353 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/redshift_cluster.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from deprecated import deprecated
from airflow.providers.amazon.aws.hooks.redshift_cluster import RedshiftHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class RedshiftClusterSensor(BaseSensorOperator):
"""
Waits for a Redshift cluster to reach a specific status.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RedshiftClusterSensor`
:param cluster_identifier: The identifier for the cluster being pinged.
:param target_status: The cluster status desired.
"""
template_fields: Sequence[str] = ("cluster_identifier", "target_status")
def __init__(
self,
*,
cluster_identifier: str,
target_status: str = "available",
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.target_status = target_status
self.aws_conn_id = aws_conn_id
def poke(self, context: Context):
current_status = self.hook.cluster_status(self.cluster_identifier)
self.log.info(
"Poked cluster %s for status '%s', found status '%s'",
self.cluster_identifier,
self.target_status,
current_status,
)
return current_status == self.target_status
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> RedshiftHook:
"""Create and return a RedshiftHook."""
return self.hook
@cached_property
def hook(self) -> RedshiftHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
| 2,623 | 33.526316 | 81 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/athena.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
if TYPE_CHECKING:
from airflow.utils.context import Context
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.athena import AthenaHook
from airflow.sensors.base import BaseSensorOperator
class AthenaSensor(BaseSensorOperator):
"""
Poll the state of the Query until it reaches a terminal state; fails if the query fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:AthenaSensor`
:param query_execution_id: query_execution_id to check the state of
:param max_retries: Number of times to poll for query state before
returning the current state, defaults to None
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:param sleep_time: Time in seconds to wait between two consecutive call to
check query status on athena, defaults to 10
"""
INTERMEDIATE_STATES = (
"QUEUED",
"RUNNING",
)
FAILURE_STATES = (
"FAILED",
"CANCELLED",
)
SUCCESS_STATES = ("SUCCEEDED",)
template_fields: Sequence[str] = ("query_execution_id",)
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
*,
query_execution_id: str,
max_retries: int | None = None,
aws_conn_id: str = "aws_default",
sleep_time: int = 10,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.query_execution_id = query_execution_id
self.sleep_time = sleep_time
self.max_retries = max_retries
def poke(self, context: Context) -> bool:
state = self.hook.poll_query_status(self.query_execution_id, self.max_retries, self.sleep_time)
if state in self.FAILURE_STATES:
raise AirflowException("Athena sensor failed")
if state in self.INTERMEDIATE_STATES:
return False
return True
@cached_property
def hook(self) -> AthenaHook:
"""Create and return an AthenaHook."""
return AthenaHook(self.aws_conn_id)
| 3,057 | 32.604396 | 103 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/batch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import timedelta
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from deprecated import deprecated
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.batch_client import BatchClientHook
from airflow.providers.amazon.aws.triggers.batch import BatchJobTrigger
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class BatchSensor(BaseSensorOperator):
"""
Poll the state of the Batch Job until it reaches a terminal state; fails if the job fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:BatchSensor`
:param job_id: Batch job_id to check the state for
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:param region_name: aws region name associated with the client
:param deferrable: Run sensor in the deferrable mode.
:param poke_interval: polling period in seconds to check for the status of the job.
:param max_retries: Number of times to poll for job state before
returning the current state.
"""
template_fields: Sequence[str] = ("job_id",)
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
*,
job_id: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poke_interval: float = 5,
max_retries: int = 5,
**kwargs,
):
super().__init__(**kwargs)
self.job_id = job_id
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.deferrable = deferrable
self.poke_interval = poke_interval
self.max_retries = max_retries
def poke(self, context: Context) -> bool:
job_description = self.hook.get_job_description(self.job_id)
state = job_description["status"]
if state == BatchClientHook.SUCCESS_STATE:
return True
if state in BatchClientHook.INTERMEDIATE_STATES:
return False
if state == BatchClientHook.FAILURE_STATE:
raise AirflowException(f"Batch sensor failed. AWS Batch job status: {state}")
raise AirflowException(f"Batch sensor failed. Unknown AWS Batch job status: {state}")
def execute(self, context: Context) -> None:
if not self.deferrable:
super().execute(context=context)
else:
timeout = (
timedelta(seconds=self.max_retries * self.poke_interval + 60)
if self.max_retries
else self.execution_timeout
)
self.defer(
timeout=timeout,
trigger=BatchJobTrigger(
job_id=self.job_id,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
waiter_delay=int(self.poke_interval),
waiter_max_attempts=self.max_retries,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
job_id = event["job_id"]
self.log.info("Batch Job %s complete", job_id)
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> BatchClientHook:
"""Create and return a BatchClientHook."""
return self.hook
@cached_property
def hook(self) -> BatchClientHook:
return BatchClientHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
)
class BatchComputeEnvironmentSensor(BaseSensorOperator):
"""
Poll the state of the Batch environment until it reaches a terminal state; fails if the environment fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:BatchComputeEnvironmentSensor`
:param compute_environment: Batch compute environment name
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:param region_name: aws region name associated with the client
"""
template_fields: Sequence[str] = ("compute_environment",)
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
compute_environment: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.compute_environment = compute_environment
self.aws_conn_id = aws_conn_id
self.region_name = region_name
@cached_property
def hook(self) -> BatchClientHook:
"""Create and return a BatchClientHook."""
return BatchClientHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
)
def poke(self, context: Context) -> bool:
response = self.hook.client.describe_compute_environments( # type: ignore[union-attr]
computeEnvironments=[self.compute_environment]
)
if len(response["computeEnvironments"]) == 0:
raise AirflowException(f"AWS Batch compute environment {self.compute_environment} not found")
status = response["computeEnvironments"][0]["status"]
if status in BatchClientHook.COMPUTE_ENVIRONMENT_TERMINAL_STATUS:
return True
if status in BatchClientHook.COMPUTE_ENVIRONMENT_INTERMEDIATE_STATUS:
return False
raise AirflowException(
f"AWS Batch compute environment failed. AWS Batch compute environment status: {status}"
)
class BatchJobQueueSensor(BaseSensorOperator):
"""
Poll the state of the Batch job queue until it reaches a terminal state; fails if the queue fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:BatchJobQueueSensor`
:param job_queue: Batch job queue name
:param treat_non_existing_as_deleted: If True, a non-existing Batch job queue is considered as a deleted
queue and as such a valid case.
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:param region_name: aws region name associated with the client
"""
template_fields: Sequence[str] = ("job_queue",)
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
job_queue: str,
treat_non_existing_as_deleted: bool = False,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.job_queue = job_queue
self.treat_non_existing_as_deleted = treat_non_existing_as_deleted
self.aws_conn_id = aws_conn_id
self.region_name = region_name
@cached_property
def hook(self) -> BatchClientHook:
"""Create and return a BatchClientHook."""
return BatchClientHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
)
def poke(self, context: Context) -> bool:
response = self.hook.client.describe_job_queues( # type: ignore[union-attr]
jobQueues=[self.job_queue]
)
if len(response["jobQueues"]) == 0:
if self.treat_non_existing_as_deleted:
return True
else:
raise AirflowException(f"AWS Batch job queue {self.job_queue} not found")
status = response["jobQueues"][0]["status"]
if status in BatchClientHook.JOB_QUEUE_TERMINAL_STATUS:
return True
if status in BatchClientHook.JOB_QUEUE_INTERMEDIATE_STATUS:
return False
raise AirflowException(f"AWS Batch job queue failed. AWS Batch job queue status: {status}")
| 9,160 | 34.370656 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/lambda_function.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.amazon.aws.hooks.lambda_function import LambdaHook
from airflow.providers.amazon.aws.utils import trim_none_values
if TYPE_CHECKING:
from airflow.utils.context import Context
from airflow.exceptions import AirflowException
from airflow.sensors.base import BaseSensorOperator
class LambdaFunctionStateSensor(BaseSensorOperator):
"""
Poll the state of the Lambda until it reaches a target state; fails if the query fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:LambdaFunctionStateSensor`
:param function_name: The name of the AWS Lambda function, version, or alias.
:param qualifier: Specify a version or alias to get details about a published version of the function.
:param target_states: The Lambda states desired.
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
"""
FAILURE_STATES = ("Failed",)
template_fields: Sequence[str] = (
"function_name",
"qualifier",
)
def __init__(
self,
*,
function_name: str,
qualifier: str | None = None,
target_states: list = ["Active"],
aws_conn_id: str = "aws_default",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.function_name = function_name
self.qualifier = qualifier
self.target_states = target_states
def poke(self, context: Context) -> bool:
get_function_args = {
"FunctionName": self.function_name,
"Qualifier": self.qualifier,
}
state = self.hook.conn.get_function(**trim_none_values(get_function_args))["Configuration"]["State"]
if state in self.FAILURE_STATES:
raise AirflowException(
"Lambda function state sensor failed because the Lambda is in a failed state"
)
return state in self.target_states
@cached_property
def hook(self) -> LambdaHook:
return LambdaHook(aws_conn_id=self.aws_conn_id)
| 3,027 | 34.209302 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/ecs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
import boto3
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.ecs import (
EcsClusterStates,
EcsHook,
EcsTaskDefinitionStates,
EcsTaskStates,
)
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
DEFAULT_CONN_ID: str = "aws_default"
def _check_failed(current_state, target_state, failure_states):
if (current_state != target_state) and (current_state in failure_states):
raise AirflowException(
f"Terminal state reached. Current state: {current_state}, Expected state: {target_state}"
)
class EcsBaseSensor(BaseSensorOperator):
"""Contains general sensor behavior for Elastic Container Service."""
def __init__(self, *, aws_conn_id: str | None = DEFAULT_CONN_ID, region: str | None = None, **kwargs):
self.aws_conn_id = aws_conn_id
self.region = region
super().__init__(**kwargs)
@cached_property
def hook(self) -> EcsHook:
"""Create and return an EcsHook."""
return EcsHook(aws_conn_id=self.aws_conn_id, region_name=self.region)
@cached_property
def client(self) -> boto3.client:
"""Create and return an EcsHook client."""
return self.hook.conn
class EcsClusterStateSensor(EcsBaseSensor):
"""
Poll the cluster state until it reaches a terminal state; raises AirflowException with the failure reason.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/sensor:EcsClusterStateSensor`
:param cluster_name: The name of your cluster.
:param target_state: Success state to watch for. (Default: "ACTIVE")
:param failure_states: Fail if any of these states are reached before the
Success State. (Default: "FAILED" or "INACTIVE")
"""
template_fields: Sequence[str] = ("cluster_name", "target_state", "failure_states")
def __init__(
self,
*,
cluster_name: str,
target_state: EcsClusterStates | None = EcsClusterStates.ACTIVE,
failure_states: set[EcsClusterStates] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.target_state = target_state
self.failure_states = failure_states or {EcsClusterStates.FAILED, EcsClusterStates.INACTIVE}
def poke(self, context: Context):
cluster_state = EcsClusterStates(self.hook.get_cluster_state(cluster_name=self.cluster_name))
self.log.info("Cluster state: %s, waiting for: %s", cluster_state, self.target_state)
_check_failed(cluster_state, self.target_state, self.failure_states)
return cluster_state == self.target_state
class EcsTaskDefinitionStateSensor(EcsBaseSensor):
"""
Poll task definition until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/sensor:EcsTaskDefinitionStateSensor`
:param task_definition: The family for the latest ACTIVE revision, family and
revision (family:revision ) for a specific revision in the family, or full
Amazon Resource Name (ARN) of the task definition.
:param target_state: Success state to watch for. (Default: "ACTIVE")
"""
template_fields: Sequence[str] = ("task_definition", "target_state", "failure_states")
def __init__(
self,
*,
task_definition: str,
target_state: EcsTaskDefinitionStates | None = EcsTaskDefinitionStates.ACTIVE,
**kwargs,
):
super().__init__(**kwargs)
self.task_definition = task_definition
self.target_state = target_state
# There are only two possible states, so set failure_state to whatever is not the target_state
self.failure_states = {
(
EcsTaskDefinitionStates.INACTIVE
if target_state == EcsTaskDefinitionStates.ACTIVE
else EcsTaskDefinitionStates.ACTIVE
)
}
def poke(self, context: Context):
task_definition_state = EcsTaskDefinitionStates(
self.hook.get_task_definition_state(task_definition=self.task_definition)
)
self.log.info("Task Definition state: %s, waiting for: %s", task_definition_state, self.target_state)
_check_failed(task_definition_state, self.target_state, [self.failure_states])
return task_definition_state == self.target_state
class EcsTaskStateSensor(EcsBaseSensor):
"""
Poll the task state until it reaches a terminal state; raises AirflowException with the failure reason.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/sensor:EcsTaskStateSensor`
:param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task.
:param task: The task ID or full ARN of the task to poll.
:param target_state: Success state to watch for. (Default: "ACTIVE")
:param failure_states: Fail if any of these states are reached before
the Success State. (Default: "STOPPED")
"""
template_fields: Sequence[str] = ("cluster", "task", "target_state", "failure_states")
def __init__(
self,
*,
cluster: str,
task: str,
target_state: EcsTaskStates | None = EcsTaskStates.RUNNING,
failure_states: set[EcsTaskStates] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.cluster = cluster
self.task = task
self.target_state = target_state
self.failure_states = failure_states or {EcsTaskStates.STOPPED}
def poke(self, context: Context):
task_state = EcsTaskStates(self.hook.get_task_state(cluster=self.cluster, task=self.task))
self.log.info("Task state: %s, waiting for: %s", task_state, self.target_state)
_check_failed(task_state, self.target_state, self.failure_states)
return task_state == self.target_state
| 7,063 | 36.978495 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/cloud_formation.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains sensors for AWS CloudFormation."""
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
if TYPE_CHECKING:
from airflow.utils.context import Context
from airflow.providers.amazon.aws.hooks.cloud_formation import CloudFormationHook
from airflow.sensors.base import BaseSensorOperator
class CloudFormationCreateStackSensor(BaseSensorOperator):
"""
Waits for a stack to be created successfully on AWS CloudFormation.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:CloudFormationCreateStackSensor`
:param stack_name: The name of the stack to wait for (templated)
:param aws_conn_id: ID of the Airflow connection where credentials and extra configuration are
stored
:param poke_interval: Time in seconds that the job should wait between each try
"""
template_fields: Sequence[str] = ("stack_name",)
ui_color = "#C5CAE9"
def __init__(self, *, stack_name, aws_conn_id="aws_default", region_name=None, **kwargs):
super().__init__(**kwargs)
self.stack_name = stack_name
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def poke(self, context: Context):
stack_status = self.hook.get_stack_status(self.stack_name)
if stack_status == "CREATE_COMPLETE":
return True
if stack_status in ("CREATE_IN_PROGRESS", None):
return False
raise ValueError(f"Stack {self.stack_name} in bad state: {stack_status}")
@cached_property
def hook(self) -> CloudFormationHook:
"""Create and return a CloudFormationHook."""
return CloudFormationHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class CloudFormationDeleteStackSensor(BaseSensorOperator):
"""
Waits for a stack to be deleted successfully on AWS CloudFormation.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:CloudFormationDeleteStackSensor`
:param stack_name: The name of the stack to wait for (templated)
:param aws_conn_id: ID of the Airflow connection where credentials and extra configuration are
stored
:param poke_interval: Time in seconds that the job should wait between each try
"""
template_fields: Sequence[str] = ("stack_name",)
ui_color = "#C5CAE9"
def __init__(
self,
*,
stack_name: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.stack_name = stack_name
def poke(self, context: Context):
stack_status = self.hook.get_stack_status(self.stack_name)
if stack_status in ("DELETE_COMPLETE", None):
return True
if stack_status == "DELETE_IN_PROGRESS":
return False
raise ValueError(f"Stack {self.stack_name} in bad state: {stack_status}")
@cached_property
def hook(self) -> CloudFormationHook:
"""Create and return a CloudFormationHook."""
return CloudFormationHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
| 4,150 | 36.736364 | 98 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/ec2.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.ec2 import EC2Hook
from airflow.providers.amazon.aws.triggers.ec2 import EC2StateSensorTrigger
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class EC2InstanceStateSensor(BaseSensorOperator):
"""
Poll the state of the AWS EC2 instance until the instance reaches the target state.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EC2InstanceStateSensor`
:param target_state: target state of instance
:param instance_id: id of the AWS EC2 instance
:param region_name: (optional) aws region name associated with the client
:param deferrable: if True, the sensor will run in deferrable mode
"""
template_fields: Sequence[str] = ("target_state", "instance_id", "region_name")
ui_color = "#cc8811"
ui_fgcolor = "#ffffff"
valid_states = ["running", "stopped", "terminated"]
def __init__(
self,
*,
target_state: str,
instance_id: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
if target_state not in self.valid_states:
raise ValueError(f"Invalid target_state: {target_state}")
super().__init__(**kwargs)
self.target_state = target_state
self.instance_id = instance_id
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.deferrable = deferrable
def execute(self, context: Context) -> Any:
if self.deferrable:
self.defer(
trigger=EC2StateSensorTrigger(
instance_id=self.instance_id,
target_state=self.target_state,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
poll_interval=int(self.poke_interval),
),
method_name="execute_complete",
)
else:
super().execute(context=context)
@cached_property
def hook(self):
return EC2Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
def poke(self, context: Context):
instance_state = self.hook.get_instance_state(instance_id=self.instance_id)
self.log.info("instance state: %s", instance_state)
return instance_state == self.target_state
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error: {event}")
return
| 3,721 | 36.59596 | 94 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/sqs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Reads and then deletes the message from SQS queue."""
from __future__ import annotations
import json
from functools import cached_property
from typing import TYPE_CHECKING, Any, Collection, Literal, Sequence
from deprecated import deprecated
from jsonpath_ng import parse
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import BaseAwsConnection
from airflow.providers.amazon.aws.hooks.sqs import SqsHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class SqsSensor(BaseSensorOperator):
"""
Get messages from an Amazon SQS queue and then delete the messages from the queue.
If deletion of messages fails, an AirflowException is thrown. Otherwise, the messages
are pushed through XCom with the key ``messages``.
By default,the sensor performs one and only one SQS call per poke, which limits the result to
a maximum of 10 messages. However, the total number of SQS API calls per poke can be controlled
by num_batches param.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SqsSensor`
:param aws_conn_id: AWS connection id
:param sqs_queue: The SQS queue url (templated)
:param max_messages: The maximum number of messages to retrieve for each poke (templated)
:param num_batches: The number of times the sensor will call the SQS API to receive messages (default: 1)
:param wait_time_seconds: The time in seconds to wait for receiving messages (default: 1 second)
:param visibility_timeout: Visibility timeout, a period of time during which
Amazon SQS prevents other consumers from receiving and processing the message.
:param message_filtering: Specified how received messages should be filtered. Supported options are:
`None` (no filtering, default), `'literal'` (message Body literal match) or `'jsonpath'`
(message Body filtered using a JSONPath expression).
You may add further methods by overriding the relevant class methods.
:param message_filtering_match_values: Optional value/s for the message filter to match on.
For example, with literal matching, if a message body matches any of the specified values
then it is included. For JSONPath matching, the result of the JSONPath expression is used
and may match any of the specified values.
:param message_filtering_config: Additional configuration to pass to the message filter.
For example with JSONPath filtering you can pass a JSONPath expression string here,
such as `'foo[*].baz'`. Messages with a Body which does not match are ignored.
:param delete_message_on_reception: Default to `True`, the messages are deleted from the queue
as soon as being consumed. Otherwise, the messages remain in the queue after consumption and
should be deleted manually.
"""
template_fields: Sequence[str] = ("sqs_queue", "max_messages", "message_filtering_config")
def __init__(
self,
*,
sqs_queue,
aws_conn_id: str = "aws_default",
max_messages: int = 5,
num_batches: int = 1,
wait_time_seconds: int = 1,
visibility_timeout: int | None = None,
message_filtering: Literal["literal", "jsonpath"] | None = None,
message_filtering_match_values: Any = None,
message_filtering_config: Any = None,
delete_message_on_reception: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.sqs_queue = sqs_queue
self.aws_conn_id = aws_conn_id
self.max_messages = max_messages
self.num_batches = num_batches
self.wait_time_seconds = wait_time_seconds
self.visibility_timeout = visibility_timeout
self.message_filtering = message_filtering
self.delete_message_on_reception = delete_message_on_reception
if message_filtering_match_values is not None:
if not isinstance(message_filtering_match_values, set):
message_filtering_match_values = set(message_filtering_match_values)
self.message_filtering_match_values = message_filtering_match_values
if self.message_filtering == "literal":
if self.message_filtering_match_values is None:
raise TypeError("message_filtering_match_values must be specified for literal matching")
self.message_filtering_config = message_filtering_config
def poll_sqs(self, sqs_conn: BaseAwsConnection) -> Collection:
"""
Poll SQS queue to retrieve messages.
:param sqs_conn: SQS connection
:return: A list of messages retrieved from SQS
"""
self.log.info("SqsSensor checking for message on queue: %s", self.sqs_queue)
receive_message_kwargs = {
"QueueUrl": self.sqs_queue,
"MaxNumberOfMessages": self.max_messages,
"WaitTimeSeconds": self.wait_time_seconds,
}
if self.visibility_timeout is not None:
receive_message_kwargs["VisibilityTimeout"] = self.visibility_timeout
response = sqs_conn.receive_message(**receive_message_kwargs)
if "Messages" not in response:
return []
messages = response["Messages"]
num_messages = len(messages)
self.log.info("Received %d messages", num_messages)
if num_messages and self.message_filtering:
messages = self.filter_messages(messages)
num_messages = len(messages)
self.log.info("There are %d messages left after filtering", num_messages)
return messages
def poke(self, context: Context):
"""
Check subscribed queue for messages and write them to xcom with the ``messages`` key.
:param context: the context object
:return: ``True`` if message is available or ``False``
"""
message_batch: list[Any] = []
# perform multiple SQS call to retrieve messages in series
for _ in range(self.num_batches):
messages = self.poll_sqs(sqs_conn=self.hook.conn)
if not len(messages):
continue
message_batch.extend(messages)
if self.delete_message_on_reception:
self.log.info("Deleting %d messages", len(messages))
entries = [
{"Id": message["MessageId"], "ReceiptHandle": message["ReceiptHandle"]}
for message in messages
]
response = self.hook.conn.delete_message_batch(QueueUrl=self.sqs_queue, Entries=entries)
if "Successful" not in response:
raise AirflowException(
"Delete SQS Messages failed " + str(response) + " for messages " + str(messages)
)
if not len(message_batch):
return False
context["ti"].xcom_push(key="messages", value=message_batch)
return True
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> SqsHook:
"""Create and return an SqsHook."""
return self.hook
@cached_property
def hook(self) -> SqsHook:
return SqsHook(aws_conn_id=self.aws_conn_id)
def filter_messages(self, messages):
if self.message_filtering == "literal":
return self.filter_messages_literal(messages)
if self.message_filtering == "jsonpath":
return self.filter_messages_jsonpath(messages)
else:
raise NotImplementedError("Override this method to define custom filters")
def filter_messages_literal(self, messages):
filtered_messages = []
for message in messages:
if message["Body"] in self.message_filtering_match_values:
filtered_messages.append(message)
return filtered_messages
def filter_messages_jsonpath(self, messages):
jsonpath_expr = parse(self.message_filtering_config)
filtered_messages = []
for message in messages:
body = message["Body"]
# Body is a string, deserialize to an object and then parse
body = json.loads(body)
results = jsonpath_expr.find(body)
if not results:
continue
if self.message_filtering_match_values is None:
filtered_messages.append(message)
continue
for result in results:
if result.value in self.message_filtering_match_values:
filtered_messages.append(message)
break
return filtered_messages
| 9,543 | 40.859649 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/dynamodb.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.amazon.aws.hooks.dynamodb import DynamoDBHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DynamoDBValueSensor(BaseSensorOperator):
"""
Waits for an attribute value to be present for an item in a DynamoDB table.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:DynamoDBValueSensor`
:param table_name: DynamoDB table name
:param partition_key_name: DynamoDB partition key name
:param partition_key_value: DynamoDB partition key value
:param attribute_name: DynamoDB attribute name
:param attribute_value: DynamoDB attribute value
:param sort_key_name: (optional) DynamoDB sort key name
:param sort_key_value: (optional) DynamoDB sort key value
:param aws_conn_id: aws connection to use
:param region_name: aws region to use
"""
template_fields: Sequence[str] = (
"table_name",
"partition_key_name",
"partition_key_value",
"attribute_name",
"attribute_value",
"sort_key_name",
"sort_key_value",
)
def __init__(
self,
table_name: str,
partition_key_name: str,
partition_key_value: str,
attribute_name: str,
attribute_value: str,
sort_key_name: str | None = None,
sort_key_value: str | None = None,
aws_conn_id: str | None = DynamoDBHook.default_conn_name,
region_name: str | None = None,
**kwargs: Any,
):
super().__init__(**kwargs)
self.table_name = table_name
self.partition_key_name = partition_key_name
self.partition_key_value = partition_key_value
self.attribute_name = attribute_name
self.attribute_value = attribute_value
self.sort_key_name = sort_key_name
self.sort_key_value = sort_key_value
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def poke(self, context: Context) -> bool:
"""Test DynamoDB item for matching attribute value."""
key = {self.partition_key_name: self.partition_key_value}
msg = (
f"Checking table {self.table_name} for "
+ f"item Partition Key: {self.partition_key_name}={self.partition_key_value}"
)
if self.sort_key_name and self.sort_key_value:
key = {self.partition_key_name: self.partition_key_value, self.sort_key_name: self.sort_key_value}
msg += f"\nSort Key: {self.sort_key_name}={self.sort_key_value}"
msg += f"\nattribute: {self.attribute_name}={self.attribute_value}"
self.log.info(msg)
table = self.hook.conn.Table(self.table_name)
self.log.info("Table: %s", table)
self.log.info("Key: %s", key)
response = table.get_item(Key=key)
try:
self.log.info("Response: %s", response)
self.log.info("Want: %s = %s", self.attribute_name, self.attribute_value)
self.log.info(
"Got: {response['Item'][self.attribute_name]} = %s", response["Item"][self.attribute_name]
)
return response["Item"][self.attribute_name] == self.attribute_value
except KeyError:
return False
@cached_property
def hook(self) -> DynamoDBHook:
"""Create and return a DynamoDBHook."""
return DynamoDBHook(self.aws_conn_id, region_name=self.region_name)
| 4,424 | 37.478261 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/glue_crawler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from deprecated import deprecated
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue_crawler import GlueCrawlerHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class GlueCrawlerSensor(BaseSensorOperator):
"""
Waits for an AWS Glue crawler to reach any of the statuses below.
'FAILED', 'CANCELLED', 'SUCCEEDED'
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:GlueCrawlerSensor`
:param crawler_name: The AWS Glue crawler unique name
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
"""
template_fields: Sequence[str] = ("crawler_name",)
def __init__(self, *, crawler_name: str, aws_conn_id: str = "aws_default", **kwargs) -> None:
super().__init__(**kwargs)
self.crawler_name = crawler_name
self.aws_conn_id = aws_conn_id
self.success_statuses = "SUCCEEDED"
self.errored_statuses = ("FAILED", "CANCELLED")
def poke(self, context: Context):
self.log.info("Poking for AWS Glue crawler: %s", self.crawler_name)
crawler_state = self.hook.get_crawler(self.crawler_name)["State"]
if crawler_state == "READY":
self.log.info("State: %s", crawler_state)
crawler_status = self.hook.get_crawler(self.crawler_name)["LastCrawl"]["Status"]
if crawler_status == self.success_statuses:
self.log.info("Status: %s", crawler_status)
return True
else:
raise AirflowException(f"Status: {crawler_status}")
else:
return False
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> GlueCrawlerHook:
"""Returns a new or pre-existing GlueCrawlerHook."""
return self.hook
@cached_property
def hook(self) -> GlueCrawlerHook:
return GlueCrawlerHook(aws_conn_id=self.aws_conn_id)
| 2,951 | 36.846154 | 97 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/eks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tracking the state of Amazon EKS Clusters, Amazon EKS managed node groups, and AWS Fargate profiles."""
from __future__ import annotations
from abc import abstractmethod
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.eks import (
ClusterStates,
EksHook,
FargateProfileStates,
NodegroupStates,
)
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
DEFAULT_CONN_ID = "aws_default"
CLUSTER_TERMINAL_STATES = frozenset({ClusterStates.ACTIVE, ClusterStates.FAILED, ClusterStates.NONEXISTENT})
FARGATE_TERMINAL_STATES = frozenset(
{
FargateProfileStates.ACTIVE,
FargateProfileStates.CREATE_FAILED,
FargateProfileStates.DELETE_FAILED,
FargateProfileStates.NONEXISTENT,
}
)
NODEGROUP_TERMINAL_STATES = frozenset(
{
NodegroupStates.ACTIVE,
NodegroupStates.CREATE_FAILED,
NodegroupStates.DELETE_FAILED,
NodegroupStates.NONEXISTENT,
}
)
UNEXPECTED_TERMINAL_STATE_MSG = (
"Terminal state reached. Current state: {current_state}, Expected state: {target_state}"
)
class EksBaseSensor(BaseSensorOperator):
"""
Base class to check various EKS states.
Subclasses need to implement get_state and get_terminal_states methods.
:param cluster_name: The name of the Cluster
:param target_state: Will return successfully when that state is reached.
:param target_state_type: The enum containing the states,
will be used to convert the target state if it has to be converted from a string
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use.
If this is None or empty then the default boto3 behaviour is used.
"""
def __init__(
self,
*,
cluster_name: str,
target_state: ClusterStates | NodegroupStates | FargateProfileStates,
target_state_type: type,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.aws_conn_id = aws_conn_id
self.region = region
self.target_state = (
target_state
if isinstance(target_state, target_state_type)
else target_state_type(str(target_state).upper())
)
@cached_property
def hook(self) -> EksHook:
return EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
def poke(self, context: Context) -> bool:
state = self.get_state()
self.log.info("Current state: %s", state)
if state in (self.get_terminal_states() - {self.target_state}):
# If we reach a terminal state which is not the target state:
raise AirflowException(
UNEXPECTED_TERMINAL_STATE_MSG.format(current_state=state, target_state=self.target_state)
)
return state == self.target_state
@abstractmethod
def get_state(self) -> ClusterStates | NodegroupStates | FargateProfileStates:
...
@abstractmethod
def get_terminal_states(self) -> frozenset:
...
class EksClusterStateSensor(EksBaseSensor):
"""
Check the state of an Amazon EKS Cluster until it reaches the target state or another terminal state.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EksClusterStateSensor`
:param cluster_name: The name of the Cluster to watch. (templated)
:param target_state: Target state of the Cluster. (templated)
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("cluster_name", "target_state", "aws_conn_id", "region")
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
target_state: ClusterStates = ClusterStates.ACTIVE,
**kwargs,
):
super().__init__(target_state=target_state, target_state_type=ClusterStates, **kwargs)
def get_state(self) -> ClusterStates:
return self.hook.get_cluster_state(clusterName=self.cluster_name)
def get_terminal_states(self) -> frozenset:
return CLUSTER_TERMINAL_STATES
class EksFargateProfileStateSensor(EksBaseSensor):
"""
Check the state of an AWS Fargate profile until it reaches the target state or another terminal state.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EksFargateProfileStateSensor`
:param cluster_name: The name of the Cluster which the AWS Fargate profile is attached to. (templated)
:param fargate_profile_name: The name of the Fargate profile to watch. (templated)
:param target_state: Target state of the Fargate profile. (templated)
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = (
"cluster_name",
"fargate_profile_name",
"target_state",
"aws_conn_id",
"region",
)
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
fargate_profile_name: str,
target_state: FargateProfileStates = FargateProfileStates.ACTIVE,
**kwargs,
):
super().__init__(target_state=target_state, target_state_type=FargateProfileStates, **kwargs)
self.fargate_profile_name = fargate_profile_name
def get_state(self) -> FargateProfileStates:
return self.hook.get_fargate_profile_state(
clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name
)
def get_terminal_states(self) -> frozenset:
return FARGATE_TERMINAL_STATES
class EksNodegroupStateSensor(EksBaseSensor):
"""
Check the state of an EKS managed node group until it reaches the target state or another terminal state.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EksNodegroupStateSensor`
:param cluster_name: The name of the Cluster which the Nodegroup is attached to. (templated)
:param nodegroup_name: The name of the Nodegroup to watch. (templated)
:param target_state: Target state of the Nodegroup. (templated)
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = (
"cluster_name",
"nodegroup_name",
"target_state",
"aws_conn_id",
"region",
)
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
nodegroup_name: str,
target_state: NodegroupStates = NodegroupStates.ACTIVE,
**kwargs,
):
super().__init__(target_state=target_state, target_state_type=NodegroupStates, **kwargs)
self.nodegroup_name = nodegroup_name
def get_state(self) -> NodegroupStates:
return self.hook.get_nodegroup_state(clusterName=self.cluster_name, nodegroupName=self.nodegroup_name)
def get_terminal_states(self) -> frozenset:
return NODEGROUP_TERMINAL_STATES
| 9,772 | 36.879845 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/rds.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowNotFoundException
from airflow.providers.amazon.aws.hooks.rds import RdsHook
from airflow.providers.amazon.aws.utils.rds import RdsDbType
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class RdsBaseSensor(BaseSensorOperator):
"""Base operator that implements common functions for all sensors."""
ui_color = "#ddbb77"
ui_fgcolor = "#ffffff"
def __init__(self, *args, aws_conn_id: str = "aws_conn_id", hook_params: dict | None = None, **kwargs):
self.hook_params = hook_params or {}
self.aws_conn_id = aws_conn_id
self.target_statuses: list[str] = []
super().__init__(*args, **kwargs)
@cached_property
def hook(self):
return RdsHook(aws_conn_id=self.aws_conn_id, **self.hook_params)
class RdsSnapshotExistenceSensor(RdsBaseSensor):
"""
Waits for RDS snapshot with a specific status.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RdsSnapshotExistenceSensor`
:param db_type: Type of the DB - either "instance" or "cluster"
:param db_snapshot_identifier: The identifier for the DB snapshot
:param target_statuses: Target status of snapshot
"""
template_fields: Sequence[str] = (
"db_snapshot_identifier",
"target_statuses",
)
def __init__(
self,
*,
db_type: str,
db_snapshot_identifier: str,
target_statuses: list[str] | None = None,
aws_conn_id: str = "aws_conn_id",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_type = RdsDbType(db_type)
self.db_snapshot_identifier = db_snapshot_identifier
self.target_statuses = target_statuses or ["available"]
def poke(self, context: Context):
self.log.info(
"Poking for statuses : %s\nfor snapshot %s", self.target_statuses, self.db_snapshot_identifier
)
try:
if self.db_type.value == "instance":
state = self.hook.get_db_snapshot_state(self.db_snapshot_identifier)
else:
state = self.hook.get_db_cluster_snapshot_state(self.db_snapshot_identifier)
except AirflowNotFoundException:
return False
return state in self.target_statuses
class RdsExportTaskExistenceSensor(RdsBaseSensor):
"""
Waits for RDS export task with a specific status.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RdsExportTaskExistenceSensor`
:param export_task_identifier: A unique identifier for the snapshot export task.
:param target_statuses: Target status of export task
"""
template_fields: Sequence[str] = (
"export_task_identifier",
"target_statuses",
)
def __init__(
self,
*,
export_task_identifier: str,
target_statuses: list[str] | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.export_task_identifier = export_task_identifier
self.target_statuses = target_statuses or [
"starting",
"in_progress",
"complete",
"canceling",
"canceled",
]
def poke(self, context: Context):
self.log.info(
"Poking for statuses : %s\nfor export task %s", self.target_statuses, self.export_task_identifier
)
try:
state = self.hook.get_export_task_state(self.export_task_identifier)
except AirflowNotFoundException:
return False
return state in self.target_statuses
class RdsDbSensor(RdsBaseSensor):
"""
Waits for an RDS instance or cluster to enter one of a number of states.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RdsDbSensor`
:param db_type: Type of the DB - either "instance" or "cluster" (default: 'instance')
:param db_identifier: The AWS identifier for the DB
:param target_statuses: Target status of DB
"""
template_fields: Sequence[str] = (
"db_identifier",
"db_type",
"target_statuses",
)
def __init__(
self,
*,
db_identifier: str,
db_type: RdsDbType | str = RdsDbType.INSTANCE,
target_statuses: list[str] | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_identifier = db_identifier
self.target_statuses = target_statuses or ["available"]
self.db_type = db_type
def poke(self, context: Context):
db_type = RdsDbType(self.db_type)
self.log.info(
"Poking for statuses : %s\nfor db instance %s", self.target_statuses, self.db_identifier
)
try:
if db_type == RdsDbType.INSTANCE:
state = self.hook.get_db_instance_state(self.db_identifier)
else:
state = self.hook.get_db_cluster_state(self.db_identifier)
except AirflowNotFoundException:
return False
return state in self.target_statuses
__all__ = [
"RdsExportTaskExistenceSensor",
"RdsDbSensor",
"RdsSnapshotExistenceSensor",
]
| 6,434 | 32.170103 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/step_function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from deprecated import deprecated
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.step_function import StepFunctionHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class StepFunctionExecutionSensor(BaseSensorOperator):
"""
Poll the Step Function State Machine Execution until it reaches a terminal state; fails if the task fails.
On successful completion of the Execution the Sensor will do an XCom Push
of the State Machine's output to `output`
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:StepFunctionExecutionSensor`
:param execution_arn: execution_arn to check the state of
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
"""
INTERMEDIATE_STATES = ("RUNNING",)
FAILURE_STATES = (
"FAILED",
"TIMED_OUT",
"ABORTED",
)
SUCCESS_STATES = ("SUCCEEDED",)
template_fields: Sequence[str] = ("execution_arn",)
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
*,
execution_arn: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.execution_arn = execution_arn
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def poke(self, context: Context):
execution_status = self.hook.describe_execution(self.execution_arn)
state = execution_status["status"]
output = json.loads(execution_status["output"]) if "output" in execution_status else None
if state in self.FAILURE_STATES:
raise AirflowException(f"Step Function sensor failed. State Machine Output: {output}")
if state in self.INTERMEDIATE_STATES:
return False
self.log.info("Doing xcom_push of output")
self.xcom_push(context, "output", output)
return True
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> StepFunctionHook:
"""Create and return a StepFunctionHook."""
return self.hook
@cached_property
def hook(self) -> StepFunctionHook:
return StepFunctionHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
| 3,333 | 33.729167 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/glacier.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from enum import Enum
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glacier import GlacierHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class JobStatus(Enum):
"""Glacier jobs description."""
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
class GlacierJobOperationSensor(BaseSensorOperator):
"""
Glacier sensor for checking job state. This operator runs only in reschedule mode.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:GlacierJobOperationSensor`
:param aws_conn_id: The reference to the AWS connection details
:param vault_name: name of Glacier vault on which job is executed
:param job_id: the job ID was returned by retrieve_inventory()
:param poke_interval: Time in seconds that the job should wait in
between each tries
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
"""
template_fields: Sequence[str] = ("vault_name", "job_id")
def __init__(
self,
*,
aws_conn_id: str = "aws_default",
vault_name: str,
job_id: str,
poke_interval: int = 60 * 20,
mode: str = "reschedule",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.vault_name = vault_name
self.job_id = job_id
self.poke_interval = poke_interval
self.mode = mode
@cached_property
def hook(self):
return GlacierHook(aws_conn_id=self.aws_conn_id)
def poke(self, context: Context) -> bool:
response = self.hook.describe_job(vault_name=self.vault_name, job_id=self.job_id)
if response["StatusCode"] == JobStatus.SUCCEEDED.value:
self.log.info("Job status: %s, code status: %s", response["Action"], response["StatusCode"])
self.log.info("Job finished successfully")
return True
elif response["StatusCode"] == JobStatus.IN_PROGRESS.value:
self.log.info("Processing...")
self.log.warning("Code status: %s", response["StatusCode"])
return False
else:
raise AirflowException(
f'Sensor failed. Job status: {response["Action"]}, code status: {response["StatusCode"]}'
)
| 4,115 | 38.576923 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/glue_catalog_partition.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from deprecated import deprecated
from airflow.providers.amazon.aws.hooks.glue_catalog import GlueCatalogHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class GlueCatalogPartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in AWS Glue Catalog.
:param table_name: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:param expression: The partition clause to wait for. This is passed as
is to the AWS Glue Catalog API's get_partitions function,
and supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``.
See https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html
#aws-glue-api-catalog-partitions-GetPartitions
:param aws_conn_id: ID of the Airflow connection where
credentials and extra configuration are stored
:param region_name: Optional aws region name (example: us-east-1). Uses region from connection
if not specified.
:param database_name: The name of the catalog database where the partitions reside.
:param poke_interval: Time in seconds that the job should wait in
between each tries
"""
template_fields: Sequence[str] = (
"database_name",
"table_name",
"expression",
)
ui_color = "#C5CAE9"
def __init__(
self,
*,
table_name: str,
expression: str = "ds='{{ ds }}'",
aws_conn_id: str = "aws_default",
region_name: str | None = None,
database_name: str = "default",
poke_interval: int = 60 * 3,
**kwargs,
):
super().__init__(poke_interval=poke_interval, **kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.table_name = table_name
self.expression = expression
self.database_name = database_name
def poke(self, context: Context):
"""Checks for existence of the partition in the AWS Glue Catalog table."""
if "." in self.table_name:
self.database_name, self.table_name = self.table_name.split(".")
self.log.info(
"Poking for table %s. %s, expression %s", self.database_name, self.table_name, self.expression
)
return self.hook.check_for_partition(self.database_name, self.table_name, self.expression)
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> GlueCatalogHook:
"""Gets the GlueCatalogHook."""
return self.hook
@cached_property
def hook(self) -> GlueCatalogHook:
return GlueCatalogHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
| 3,719 | 37.75 | 106 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/quicksight.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.quicksight import QuickSightHook
from airflow.providers.amazon.aws.hooks.sts import StsHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class QuickSightSensor(BaseSensorOperator):
"""
Watches for the status of an Amazon QuickSight Ingestion.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:QuickSightSensor`
:param data_set_id: ID of the dataset used in the ingestion.
:param ingestion_id: ID for the ingestion.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("data_set_id", "ingestion_id", "aws_conn_id")
def __init__(
self,
*,
data_set_id: str,
ingestion_id: str,
aws_conn_id: str = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.data_set_id = data_set_id
self.ingestion_id = ingestion_id
self.aws_conn_id = aws_conn_id
self.success_status = "COMPLETED"
self.errored_statuses = ("FAILED", "CANCELLED")
def poke(self, context: Context) -> bool:
"""
Pokes until the QuickSight Ingestion has successfully finished.
:param context: The task context during execution.
:return: True if it COMPLETED and False if not.
"""
self.log.info("Poking for Amazon QuickSight Ingestion ID: %s", self.ingestion_id)
aws_account_id = self.sts_hook.get_account_number()
quicksight_ingestion_state = self.quicksight_hook.get_status(
aws_account_id, self.data_set_id, self.ingestion_id
)
self.log.info("QuickSight Status: %s", quicksight_ingestion_state)
if quicksight_ingestion_state in self.errored_statuses:
error = self.quicksight_hook.get_error_info(aws_account_id, self.data_set_id, self.ingestion_id)
raise AirflowException(f"The QuickSight Ingestion failed. Error info: {error}")
return quicksight_ingestion_state == self.success_status
@cached_property
def quicksight_hook(self):
return QuickSightHook(aws_conn_id=self.aws_conn_id)
@cached_property
def sts_hook(self):
return StsHook(aws_conn_id=self.aws_conn_id)
| 3,626 | 38.857143 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import fnmatch
import os
import re
from datetime import datetime, timedelta
from functools import cached_property
from typing import TYPE_CHECKING, Any, Callable, Sequence, cast
from deprecated import deprecated
from airflow.configuration import conf
if TYPE_CHECKING:
from airflow.utils.context import Context
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.triggers.s3 import S3KeysUnchangedTrigger, S3KeyTrigger
from airflow.sensors.base import BaseSensorOperator, poke_mode_only
class S3KeySensor(BaseSensorOperator):
"""
Waits for one or multiple keys (a file-like instance on S3) to be present in a S3 bucket.
The path is just a key/value pointer to a resource for the given S3 path.
Note: S3 does not support folders directly, and only provides key/value pairs.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:S3KeySensor`
:param bucket_key: The key(s) being waited on. Supports full s3:// style url
or relative path from root level. When it's specified as a full s3://
url, please leave bucket_name as `None`
:param bucket_name: Name of the S3 bucket. Only needed when ``bucket_key``
is not provided as a full ``s3://`` url. When specified, all the keys passed to ``bucket_key``
refers to this bucket
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:param check_fn: Function that receives the list of the S3 objects,
and returns a boolean:
- ``True``: the criteria is met
- ``False``: the criteria isn't met
**Example**: Wait for any S3 object size more than 1 megabyte ::
def check_fn(files: List) -> bool:
return any(f.get('Size', 0) > 1048576 for f in files)
:param aws_conn_id: a reference to the s3 connection
:param deferrable: Run operator in the deferrable mode
:param verify: Whether to verify SSL certificates for S3 connection.
By default, SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
"""
template_fields: Sequence[str] = ("bucket_key", "bucket_name")
def __init__(
self,
*,
bucket_key: str | list[str],
bucket_name: str | None = None,
wildcard_match: bool = False,
check_fn: Callable[..., bool] | None = None,
aws_conn_id: str = "aws_default",
verify: str | bool | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.check_fn = check_fn
self.aws_conn_id = aws_conn_id
self.verify = verify
self.deferrable = deferrable
def _check_key(self, key):
bucket_name, key = S3Hook.get_s3_bucket_key(self.bucket_name, key, "bucket_name", "bucket_key")
self.log.info("Poking for key : s3://%s/%s", bucket_name, key)
"""
Set variable `files` which contains a list of dict which contains only the size
If needed we might want to add other attributes later
Format: [{
'Size': int
}]
"""
if self.wildcard_match:
prefix = re.split(r"[\[\*\?]", key, 1)[0]
keys = self.hook.get_file_metadata(prefix, bucket_name)
key_matches = [k for k in keys if fnmatch.fnmatch(k["Key"], key)]
if len(key_matches) == 0:
return False
# Reduce the set of metadata to size only
files = list(map(lambda f: {"Size": f["Size"]}, key_matches))
else:
obj = self.hook.head_object(key, bucket_name)
if obj is None:
return False
files = [{"Size": obj["ContentLength"]}]
if self.check_fn is not None:
return self.check_fn(files)
return True
def poke(self, context: Context):
if isinstance(self.bucket_key, str):
return self._check_key(self.bucket_key)
else:
return all(self._check_key(key) for key in self.bucket_key)
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger."""
if not self.deferrable:
super().execute(context)
else:
if not self.poke(context=context):
self._defer()
def _defer(self) -> None:
"""Check for a keys in s3 and defers using the triggerer."""
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=S3KeyTrigger(
bucket_name=cast(str, self.bucket_name),
bucket_key=self.bucket_key,
wildcard_match=self.wildcard_match,
aws_conn_id=self.aws_conn_id,
verify=self.verify,
poke_interval=self.poke_interval,
should_check_fn=True if self.check_fn else False,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> bool | None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "running":
found_keys = self.check_fn(event["files"]) # type: ignore[misc]
if found_keys:
return None
else:
self._defer()
if event["status"] == "error":
raise AirflowException(event["message"])
return None
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> S3Hook:
"""Create and return an S3Hook."""
return self.hook
@cached_property
def hook(self) -> S3Hook:
return S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
@poke_mode_only
class S3KeysUnchangedSensor(BaseSensorOperator):
"""
Return True if inactivity_period has passed with no increase in the number of objects matching prefix.
Note, this sensor will not behave correctly in reschedule mode, as the state of the listed
objects in the S3 bucket will be lost between rescheduled invocations.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:S3KeysUnchangedSensor`
:param bucket_name: Name of the S3 bucket
:param prefix: The prefix being waited on. Relative path from bucket root level.
:param aws_conn_id: a reference to the s3 connection
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param inactivity_period: The total seconds of inactivity to designate
keys unchanged. Note, this mechanism is not real time and
this operator may not return until a poke_interval after this period
has passed with no additional objects sensed.
:param min_objects: The minimum number of objects needed for keys unchanged
sensor to be considered valid.
:param previous_objects: The set of object ids found during the last poke.
:param allow_delete: Should this sensor consider objects being deleted
between pokes valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:param deferrable: Run sensor in the deferrable mode
"""
template_fields: Sequence[str] = ("bucket_name", "prefix")
def __init__(
self,
*,
bucket_name: str,
prefix: str,
aws_conn_id: str = "aws_default",
verify: bool | str | None = None,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: set[str] | None = None,
allow_delete: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects or set()
self.inactivity_seconds = 0
self.allow_delete = allow_delete
self.deferrable = deferrable
self.aws_conn_id = aws_conn_id
self.verify = verify
self.last_activity_time: datetime | None = None
@cached_property
def hook(self):
"""Returns S3Hook."""
return S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
def is_keys_unchanged(self, current_objects: set[str]) -> bool:
"""
Check for new objects after the inactivity_period and update the sensor state accordingly.
:param current_objects: set of object ids in bucket during last poke.
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s, resetting last_activity_time.",
os.path.join(self.bucket_name, self.prefix),
)
self.log.debug("New objects: %s", current_objects - self.previous_objects)
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
deleted_objects = self.previous_objects - current_objects
self.previous_objects = current_objects
self.last_activity_time = datetime.now()
self.log.info(
"Objects were deleted during the last poke interval. Updating the "
"file counter and resetting last_activity_time:\n%s",
deleted_objects,
)
return False
raise AirflowException(
f"Illegal behavior: objects were deleted in"
f" {os.path.join(self.bucket_name, self.prefix)} between pokes."
)
if self.last_activity_time:
self.inactivity_seconds = int((datetime.now() - self.last_activity_time).total_seconds())
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket_name, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
"SUCCESS: \nSensor found %s objects at %s.\n"
"Waited at least %s seconds, with no new objects uploaded.",
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error("FAILURE: Inactivity Period passed, not enough objects found in %s", path)
return False
return False
def poke(self, context: Context):
return self.is_keys_unchanged(set(self.hook.list_keys(self.bucket_name, prefix=self.prefix)))
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger if deferrable is True."""
if not self.deferrable:
super().execute(context)
else:
if not self.poke(context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=S3KeysUnchangedTrigger(
bucket_name=self.bucket_name,
prefix=self.prefix,
inactivity_period=self.inactivity_period,
min_objects=self.min_objects,
previous_objects=self.previous_objects,
inactivity_seconds=self.inactivity_seconds,
allow_delete=self.allow_delete,
aws_conn_id=self.aws_conn_id,
verify=self.verify,
last_activity_time=self.last_activity_time,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event and event["status"] == "error":
raise AirflowException(event["message"])
return None
| 15,066 | 40.279452 | 106 | py |
airflow | airflow-main/airflow/providers/amazon/aws/sensors/dms.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Iterable, Sequence
from deprecated import deprecated
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.dms import DmsHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DmsTaskBaseSensor(BaseSensorOperator):
"""
Contains general sensor behavior for DMS task.
Subclasses should set ``target_statuses`` and ``termination_statuses`` fields.
:param replication_task_arn: AWS DMS replication task ARN
:param aws_conn_id: aws connection to uses
:param target_statuses: the target statuses, sensor waits until
the task reaches any of these states
:param termination_statuses: the termination statuses, sensor fails when
the task reaches any of these states
"""
template_fields: Sequence[str] = ("replication_task_arn",)
template_ext: Sequence[str] = ()
def __init__(
self,
replication_task_arn: str,
aws_conn_id="aws_default",
target_statuses: Iterable[str] | None = None,
termination_statuses: Iterable[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
self.replication_task_arn = replication_task_arn
self.target_statuses: Iterable[str] = target_statuses or []
self.termination_statuses: Iterable[str] = termination_statuses or []
@deprecated(reason="use `hook` property instead.")
def get_hook(self) -> DmsHook:
"""Get DmsHook."""
return self.hook
@cached_property
def hook(self) -> DmsHook:
return DmsHook(self.aws_conn_id)
def poke(self, context: Context):
status: str | None = self.hook.get_task_status(self.replication_task_arn)
if not status:
raise AirflowException(
f"Failed to read task status, task with ARN {self.replication_task_arn} not found"
)
self.log.info("DMS Replication task (%s) has status: %s", self.replication_task_arn, status)
if status in self.target_statuses:
return True
if status in self.termination_statuses:
raise AirflowException(f"Unexpected status: {status}")
return False
class DmsTaskCompletedSensor(DmsTaskBaseSensor):
"""
Pokes DMS task until it is completed.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:DmsTaskCompletedSensor`
:param replication_task_arn: AWS DMS replication task ARN
"""
template_fields: Sequence[str] = ("replication_task_arn",)
template_ext: Sequence[str] = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_statuses = ["stopped"]
self.termination_statuses = [
"creating",
"deleting",
"failed",
"failed-move",
"modifying",
"moving",
"ready",
"testing",
]
| 3,984 | 32.208333 | 100 | py |
airflow | airflow-main/airflow/providers/samba/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-samba:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,530 | 35.452381 | 114 | py |
airflow | airflow-main/airflow/providers/samba/hooks/samba.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import posixpath
from functools import wraps
from shutil import copyfileobj
import smbclient
import smbprotocol.connection
from airflow.hooks.base import BaseHook
class SambaHook(BaseHook):
"""Allows for interaction with a Samba server.
The hook should be used as a context manager in order to correctly
set up a session and disconnect open connections upon exit.
:param samba_conn_id: The connection id reference.
:param share:
An optional share name. If this is unset then the "schema" field of
the connection is used in its place.
"""
conn_name_attr = "samba_conn_id"
default_conn_name = "samba_default"
conn_type = "samba"
hook_name = "Samba"
def __init__(self, samba_conn_id: str = default_conn_name, share: str | None = None) -> None:
super().__init__()
conn = self.get_connection(samba_conn_id)
if not conn.login:
self.log.info("Login not provided")
if not conn.password:
self.log.info("Password not provided")
connection_cache: dict[str, smbprotocol.connection.Connection] = {}
self._host = conn.host
self._share = share or conn.schema
self._connection_cache = connection_cache
self._conn_kwargs = {
"username": conn.login,
"password": conn.password,
"port": conn.port or 445,
"connection_cache": connection_cache,
}
def __enter__(self):
# This immediately connects to the host (which can be
# perceived as a benefit), but also help work around an issue:
#
# https://github.com/jborean93/smbprotocol/issues/109.
smbclient.register_session(self._host, **self._conn_kwargs)
return self
def __exit__(self, exc_type, exc_value, traceback):
for host, connection in self._connection_cache.items():
self.log.info("Disconnecting from %s", host)
connection.disconnect()
self._connection_cache.clear()
def _join_path(self, path):
return f"//{posixpath.join(self._host, self._share, path.lstrip('/'))}"
@wraps(smbclient.link)
def link(self, src, dst, follow_symlinks=True):
return smbclient.link(
self._join_path(src),
self._join_path(dst),
follow_symlinks=follow_symlinks,
**self._conn_kwargs,
)
@wraps(smbclient.listdir)
def listdir(self, path):
return smbclient.listdir(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.lstat)
def lstat(self, path):
return smbclient.lstat(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.makedirs)
def makedirs(self, path, exist_ok=False):
return smbclient.makedirs(self._join_path(path), exist_ok=exist_ok, **self._conn_kwargs)
@wraps(smbclient.mkdir)
def mkdir(self, path):
return smbclient.mkdir(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.open_file)
def open_file(
self,
path,
mode="r",
buffering=-1,
encoding=None,
errors=None,
newline=None,
share_access=None,
desired_access=None,
file_attributes=None,
file_type="file",
):
return smbclient.open_file(
self._join_path(path),
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
share_access=share_access,
desired_access=desired_access,
file_attributes=file_attributes,
file_type=file_type,
**self._conn_kwargs,
)
@wraps(smbclient.readlink)
def readlink(self, path):
return smbclient.readlink(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.remove)
def remove(self, path):
return smbclient.remove(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.removedirs)
def removedirs(self, path):
return smbclient.removedirs(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.rename)
def rename(self, src, dst):
return smbclient.rename(self._join_path(src), self._join_path(dst), **self._conn_kwargs)
@wraps(smbclient.replace)
def replace(self, src, dst):
return smbclient.replace(self._join_path(src), self._join_path(dst), **self._conn_kwargs)
@wraps(smbclient.rmdir)
def rmdir(self, path):
return smbclient.rmdir(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.scandir)
def scandir(self, path, search_pattern="*"):
return smbclient.scandir(
self._join_path(path),
search_pattern=search_pattern,
**self._conn_kwargs,
)
@wraps(smbclient.stat)
def stat(self, path, follow_symlinks=True):
return smbclient.stat(self._join_path(path), follow_symlinks=follow_symlinks, **self._conn_kwargs)
@wraps(smbclient.stat_volume)
def stat_volume(self, path):
return smbclient.stat_volume(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.symlink)
def symlink(self, src, dst, target_is_directory=False):
return smbclient.symlink(
self._join_path(src),
self._join_path(dst),
target_is_directory=target_is_directory,
**self._conn_kwargs,
)
@wraps(smbclient.truncate)
def truncate(self, path, length):
return smbclient.truncate(self._join_path(path), length, **self._conn_kwargs)
@wraps(smbclient.unlink)
def unlink(self, path):
return smbclient.unlink(self._join_path(path), **self._conn_kwargs)
@wraps(smbclient.utime)
def utime(self, path, times=None, ns=None, follow_symlinks=True):
return smbclient.utime(
self._join_path(path),
times=times,
ns=ns,
follow_symlinks=follow_symlinks,
**self._conn_kwargs,
)
@wraps(smbclient.walk)
def walk(self, path, topdown=True, onerror=None, follow_symlinks=False):
return smbclient.walk(
self._join_path(path),
topdown=topdown,
onerror=onerror,
follow_symlinks=follow_symlinks,
**self._conn_kwargs,
)
@wraps(smbclient.getxattr)
def getxattr(self, path, attribute, follow_symlinks=True):
return smbclient.getxattr(
self._join_path(path), attribute, follow_symlinks=follow_symlinks, **self._conn_kwargs
)
@wraps(smbclient.listxattr)
def listxattr(self, path, follow_symlinks=True):
return smbclient.listxattr(
self._join_path(path), follow_symlinks=follow_symlinks, **self._conn_kwargs
)
@wraps(smbclient.removexattr)
def removexattr(self, path, attribute, follow_symlinks=True):
return smbclient.removexattr(
self._join_path(path), attribute, follow_symlinks=follow_symlinks, **self._conn_kwargs
)
@wraps(smbclient.setxattr)
def setxattr(self, path, attribute, value, flags=0, follow_symlinks=True):
return smbclient.setxattr(
self._join_path(path),
attribute,
value,
flags=flags,
follow_symlinks=follow_symlinks,
**self._conn_kwargs,
)
def push_from_local(self, destination_filepath: str, local_filepath: str):
"""Push local file to samba server."""
with open(local_filepath, "rb") as f, self.open_file(destination_filepath, mode="wb") as g:
copyfileobj(f, g)
| 8,454 | 32.955823 | 106 | py |
airflow | airflow-main/airflow/providers/samba/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/salesforce/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "5.4.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-salesforce:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,535 | 35.571429 | 119 | py |
airflow | airflow-main/airflow/providers/salesforce/operators/salesforce_apex_rest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.salesforce.hooks.salesforce import SalesforceHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SalesforceApexRestOperator(BaseOperator):
"""
Execute a APEX Rest API action.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SalesforceApexRestOperator`
:param endpoint: The REST endpoint for the request.
:param method: HTTP method for the request (default GET)
:param payload: A dict of parameters to send in a POST / PUT request
:param salesforce_conn_id: The :ref:`Salesforce Connection id <howto/connection:SalesforceHook>`.
"""
def __init__(
self,
*,
endpoint: str,
method: str = "GET",
payload: dict,
salesforce_conn_id: str = "salesforce_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.endpoint = endpoint
self.method = method
self.payload = payload
self.salesforce_conn_id = salesforce_conn_id
def execute(self, context: Context) -> dict:
"""
Makes an HTTP request to an APEX REST endpoint and pushes results to xcom.
:param context: The task context during execution.
:return: Apex response
"""
result: dict = {}
sf_hook = SalesforceHook(salesforce_conn_id=self.salesforce_conn_id)
conn = sf_hook.get_conn()
execution_result = conn.apexecute(action=self.endpoint, method=self.method, data=self.payload)
if self.do_xcom_push:
result = execution_result
return result
| 2,555 | 34.5 | 102 | py |
airflow | airflow-main/airflow/providers/salesforce/operators/bulk.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.salesforce.hooks.salesforce import SalesforceHook
from airflow.typing_compat import Literal
if TYPE_CHECKING:
from airflow.utils.context import Context
class SalesforceBulkOperator(BaseOperator):
"""
Execute a Salesforce Bulk API and pushes results to xcom.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SalesforceBulkOperator`
:param operation: Bulk operation to be performed
Available operations are in ['insert', 'update', 'upsert', 'delete', 'hard_delete']
:param object_name: The name of the Salesforce object
:param payload: list of dict to be passed as a batch
:param external_id_field: unique identifier field for upsert operations
:param batch_size: number of records to assign for each batch in the job
:param use_serial: Process batches in serial mode
:param salesforce_conn_id: The :ref:`Salesforce Connection id <howto/connection:SalesforceHook>`.
"""
available_operations = ("insert", "update", "upsert", "delete", "hard_delete")
def __init__(
self,
*,
operation: Literal["insert", "update", "upsert", "delete", "hard_delete"],
object_name: str,
payload: list,
external_id_field: str = "Id",
batch_size: int = 10000,
use_serial: bool = False,
salesforce_conn_id: str = "salesforce_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation = operation
self.object_name = object_name
self.payload = payload
self.external_id_field = external_id_field
self.batch_size = batch_size
self.use_serial = use_serial
self.salesforce_conn_id = salesforce_conn_id
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.object_name:
raise ValueError("The required parameter 'object_name' cannot have an empty value.")
if self.operation not in self.available_operations:
raise ValueError(
f"Operation {self.operation!r} not found! "
f"Available operations are {self.available_operations}."
)
def execute(self, context: Context):
"""
Makes an HTTP request to Salesforce Bulk API.
:param context: The task context during execution.
:return: API response if do_xcom_push is True
"""
sf_hook = SalesforceHook(salesforce_conn_id=self.salesforce_conn_id)
conn = sf_hook.get_conn()
result = []
if self.operation == "insert":
result = conn.bulk.__getattr__(self.object_name).insert(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
elif self.operation == "update":
result = conn.bulk.__getattr__(self.object_name).update(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
elif self.operation == "upsert":
result = conn.bulk.__getattr__(self.object_name).upsert(
data=self.payload,
external_id_field=self.external_id_field,
batch_size=self.batch_size,
use_serial=self.use_serial,
)
elif self.operation == "delete":
result = conn.bulk.__getattr__(self.object_name).delete(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
elif self.operation == "hard_delete":
result = conn.bulk.__getattr__(self.object_name).hard_delete(
data=self.payload, batch_size=self.batch_size, use_serial=self.use_serial
)
if self.do_xcom_push and result:
return result
return None
| 4,762 | 38.691667 | 101 | py |
airflow | airflow-main/airflow/providers/salesforce/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/salesforce/hooks/salesforce.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Connect to your Salesforce instance, retrieve data from it, and write that data to a file for other uses.
.. note:: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from __future__ import annotations
import logging
import time
from functools import cached_property
from typing import Any, Iterable
import pandas as pd
from requests import Session
from simple_salesforce import Salesforce, api
from airflow.hooks.base import BaseHook
log = logging.getLogger(__name__)
class SalesforceHook(BaseHook):
"""
Creates new connection to Salesforce and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other Airflow operators to move the data into another data source.
:param conn_id: The name of the connection that has the parameters needed to connect to Salesforce.
The connection should be of type `Salesforce`.
:param session_id: The access token for a given HTTP request session.
:param session: A custom HTTP request session. This enables the use of requests Session features not
otherwise exposed by `simple_salesforce`.
.. note::
A connection to Salesforce can be created via several authentication options:
* Password: Provide Username, Password, and Security Token
* Direct Session: Provide a `session_id` and either Instance or Instance URL
* OAuth 2.0 JWT: Provide a Consumer Key and either a Private Key or Private Key File Path
* IP Filtering: Provide Username, Password, and an Organization ID
If in sandbox, enter a Domain value of 'test'.
"""
conn_name_attr = "salesforce_conn_id"
default_conn_name = "salesforce_default"
conn_type = "salesforce"
hook_name = "Salesforce"
def __init__(
self,
salesforce_conn_id: str = default_conn_name,
session_id: str | None = None,
session: Session | None = None,
) -> None:
super().__init__()
self.conn_id = salesforce_conn_id
self.session_id = session_id
self.session = session
def _get_field(self, extras: dict, field_name: str):
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
backcompat_prefix = "extra__salesforce__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
"when using this method."
)
if field_name in extras:
return extras[field_name] or None
prefixed_name = f"{backcompat_prefix}{field_name}"
return extras.get(prefixed_name) or None
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"security_token": PasswordField(lazy_gettext("Security Token"), widget=BS3PasswordFieldWidget()),
"domain": StringField(lazy_gettext("Domain"), widget=BS3TextFieldWidget()),
"consumer_key": StringField(lazy_gettext("Consumer Key"), widget=BS3TextFieldWidget()),
"private_key_file_path": PasswordField(
lazy_gettext("Private Key File Path"), widget=BS3PasswordFieldWidget()
),
"private_key": PasswordField(lazy_gettext("Private Key"), widget=BS3PasswordFieldWidget()),
"organization_id": StringField(lazy_gettext("Organization ID"), widget=BS3TextFieldWidget()),
"instance": StringField(lazy_gettext("Instance"), widget=BS3TextFieldWidget()),
"instance_url": StringField(lazy_gettext("Instance URL"), widget=BS3TextFieldWidget()),
"proxies": StringField(lazy_gettext("Proxies"), widget=BS3TextFieldWidget()),
"version": StringField(lazy_gettext("API Version"), widget=BS3TextFieldWidget()),
"client_id": StringField(lazy_gettext("Client ID"), widget=BS3TextFieldWidget()),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "extra", "host"],
"relabeling": {
"login": "Username",
},
}
@cached_property
def conn(self) -> api.Salesforce:
"""Returns a Salesforce instance. (cached)."""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
# all extras below (besides the version one) are explicitly defaulted to None
# because simple-salesforce has a built-in authentication-choosing method that
# relies on which arguments are None and without "or None" setting this connection
# in the UI will result in the blank extras being empty strings instead of None,
# which would break the connection if "get" was used on its own.
conn = Salesforce(
username=connection.login,
password=connection.password,
security_token=self._get_field(extras, "security_token") or None,
domain=self._get_field(extras, "domain") or None,
session_id=self.session_id,
instance=self._get_field(extras, "instance") or None,
instance_url=self._get_field(extras, "instance_url") or None,
organizationId=self._get_field(extras, "organization_id") or None,
version=self._get_field(extras, "version") or api.DEFAULT_API_VERSION,
proxies=self._get_field(extras, "proxies") or None,
session=self.session,
client_id=self._get_field(extras, "client_id") or None,
consumer_key=self._get_field(extras, "consumer_key") or None,
privatekey_file=self._get_field(extras, "private_key_file_path") or None,
privatekey=self._get_field(extras, "private_key") or None,
)
return conn
def get_conn(self) -> api.Salesforce:
"""Returns a Salesforce instance. (cached)."""
return self.conn
def make_query(self, query: str, include_deleted: bool = False, query_params: dict | None = None) -> dict:
"""
Make a query to Salesforce.
:param query: The query to make to Salesforce.
:param include_deleted: True if the query should include deleted records.
:param query_params: Additional optional arguments
:return: The query result.
"""
conn = self.get_conn()
self.log.info("Querying for all objects")
query_params = query_params or {}
query_results = conn.query_all(query, include_deleted=include_deleted, **query_params)
self.log.info(
"Received results: Total size: %s; Done: %s", query_results["totalSize"], query_results["done"]
)
return query_results
def describe_object(self, obj: str) -> dict:
"""
Get the description of an object from Salesforce.
This description is the object's schema and
some extra metadata that Salesforce stores for each object.
:param obj: The name of the Salesforce object that we are getting a description of.
:return: the description of the Salesforce object.
"""
conn = self.get_conn()
return conn.__getattr__(obj).describe()
def get_available_fields(self, obj: str) -> list[str]:
"""
Get a list of all available fields for an object.
:param obj: The name of the Salesforce object that we are getting a description of.
:return: the names of the fields.
"""
obj_description = self.describe_object(obj)
return [field["name"] for field in obj_description["fields"]]
def get_object_from_salesforce(self, obj: str, fields: Iterable[str]) -> dict:
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
:param obj: The object name to get from Salesforce.
:param fields: The fields to get from the object.
:return: all instances of the object from Salesforce.
"""
query = f"SELECT {','.join(fields)} FROM {obj}"
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]]),
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, column: pd.Series) -> pd.Series:
"""
Convert a column of a dataframe to UNIX timestamps if applicable.
:param column: A Series object representing a column of a dataframe.
:return: a new series that maintains the same index as the original
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
column = pd.to_datetime(column)
except ValueError:
log.error("Could not convert field to timestamps: %s", column.name)
return column
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for value in column:
try:
converted.append(value.timestamp())
except (ValueError, AttributeError):
converted.append(pd.np.NaN)
return pd.Series(converted, index=column.index)
def write_object_to_file(
self,
query_results: list[dict],
filename: str,
fmt: str = "csv",
coerce_to_timestamp: bool = False,
record_time_added: bool = False,
) -> pd.DataFrame:
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data should be dumped to
:param fmt: the format you want the output in. Default: 'csv'
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:return: the dataframe that gets written to the file.
"""
fmt = fmt.lower()
if fmt not in ["csv", "json", "ndjson"]:
raise ValueError(f"Format value is not recognized: {fmt}")
df = self.object_to_df(
query_results=query_results,
coerce_to_timestamp=coerce_to_timestamp,
record_time_added=record_time_added,
)
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = (
df[possible_strings]
.astype(str)
.apply(lambda x: x.str.replace("\r\n", "").str.replace("\n", ""))
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
def object_to_df(
self, query_results: list[dict], coerce_to_timestamp: bool = False, record_time_added: bool = False
) -> pd.DataFrame:
"""
Export query results to dataframe.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:return: the dataframe.
"""
# this line right here will convert all integers to floats
# if there are any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]["attributes"]["type"]
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be converted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
field["name"].lower()
for field in schema["fields"]
if field["type"] in ["date", "datetime"] and field["name"].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
return df
def test_connection(self):
"""Test the Salesforce connectivity."""
try:
self.describe_object("Account")
status = True
message = "Connection successfully tested"
except Exception as e:
status = False
message = str(e)
return status, message
| 18,008 | 43.248157 | 110 | py |
airflow | airflow-main/airflow/providers/salesforce/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/elasticsearch/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "5.0.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-elasticsearch:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,538 | 35.642857 | 122 | py |
airflow | airflow-main/airflow/providers/elasticsearch/hooks/elasticsearch.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from functools import cached_property
from typing import Any
from urllib import parse
from elasticsearch import Elasticsearch
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.hooks.base import BaseHook
from airflow.models.connection import Connection as AirflowConnection
from airflow.providers.common.sql.hooks.sql import DbApiHook
def connect(
host: str = "localhost",
port: int = 9200,
user: str | None = None,
password: str | None = None,
scheme: str = "http",
**kwargs: Any,
) -> ESConnection:
return ESConnection(host, port, user, password, scheme, **kwargs)
class ESConnection:
"""wrapper class for elasticsearch.Elasticsearch."""
def __init__(
self,
host: str = "localhost",
port: int = 9200,
user: str | None = None,
password: str | None = None,
scheme: str = "http",
**kwargs: Any,
):
self.host = host
self.port = port
self.user = user
self.password = password
self.scheme = scheme
self.kwargs = kwargs
netloc = f"{host}:{port}"
self.url = parse.urlunparse((scheme, netloc, "/", None, None, None))
if user and password:
self.es = Elasticsearch(self.url, http_auth=(user, password), **self.kwargs)
else:
self.es = Elasticsearch(self.url, **self.kwargs)
class ElasticsearchSQLHook(DbApiHook):
"""
Interact with Elasticsearch through the elasticsearch-dbapi.
This hook uses the Elasticsearch conn_id.
:param elasticsearch_conn_id: The :ref:`ElasticSearch connection id <howto/connection:elasticsearch>`
used for Elasticsearch credentials.
"""
conn_name_attr = "elasticsearch_conn_id"
default_conn_name = "elasticsearch_default"
conn_type = "elasticsearch"
hook_name = "Elasticsearch"
def __init__(self, schema: str = "http", connection: AirflowConnection | None = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.schema = schema
self.connection = connection
def get_conn(self) -> ESConnection:
"""Returns a elasticsearch connection object."""
conn_id = getattr(self, self.conn_name_attr)
conn = self.connection or self.get_connection(conn_id)
conn_args = dict(
host=conn.host,
port=conn.port,
user=conn.login or None,
password=conn.password or None,
scheme=conn.schema or "http",
)
if conn.extra_dejson.get("http_compress", False):
conn_args["http_compress"] = bool(["http_compress"])
if conn.extra_dejson.get("timeout", False):
conn_args["timeout"] = conn.extra_dejson["timeout"]
conn = connect(**conn_args)
return conn
def get_uri(self) -> str:
conn_id = getattr(self, self.conn_name_attr)
conn = self.connection or self.get_connection(conn_id)
login = ""
if conn.login:
login = "{conn.login}:{conn.password}@".format(conn=conn)
host = conn.host
if conn.port is not None:
host += f":{conn.port}"
uri = "{conn.conn_type}+{conn.schema}://{login}{host}/".format(conn=conn, login=login, host=host)
extras_length = len(conn.extra_dejson)
if not extras_length:
return uri
uri += "?"
for arg_key, arg_value in conn.extra_dejson.items():
extras_length -= 1
uri += f"{arg_key}={arg_value}"
if extras_length:
uri += "&"
return uri
class ElasticsearchHook(ElasticsearchSQLHook):
"""
This class is deprecated and was renamed to ElasticsearchSQLHook.
Please use :class:`airflow.providers.elasticsearch.hooks.elasticsearch.ElasticsearchSQLHook`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.elasticsearch.hooks.elasticsearch.ElasticsearchSQLHook`.""",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
class ElasticsearchPythonHook(BaseHook):
"""
Interacts with Elasticsearch. This hook uses the official Elasticsearch Python Client.
:param hosts: list: A list of a single or many Elasticsearch instances. Example: ["http://localhost:9200"]
:param es_conn_args: dict: Additional arguments you might need to enter to connect to Elasticsearch.
Example: {"ca_cert":"/path/to/cert", "basic_auth": "(user, pass)"}
"""
def __init__(self, hosts: list[Any], es_conn_args: dict | None = None):
super().__init__()
self.hosts = hosts
self.es_conn_args = es_conn_args if es_conn_args else {}
def _get_elastic_connection(self):
"""Returns the Elasticsearch client."""
client = Elasticsearch(self.hosts, **self.es_conn_args)
return client
@cached_property
def get_conn(self):
"""Returns the Elasticsearch client (cached)."""
return self._get_elastic_connection()
def search(self, query: dict[Any, Any], index: str = "_all") -> dict:
"""
Returns results matching a query using Elasticsearch DSL.
:param index: str: The index you want to query
:param query: dict: The query you want to run
:returns: dict: The response 'hits' object from Elasticsearch
"""
es_client = self.get_conn
result = es_client.search(index=index, body=query)
return result["hits"]
| 6,481 | 32.241026 | 110 | py |
airflow | airflow-main/airflow/providers/elasticsearch/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/elasticsearch/log/es_response.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
def _wrap(val):
if isinstance(val, dict):
return AttributeDict(val)
return val
class AttributeList:
"""Helper class to provide attribute like access to List objects."""
def __init__(self, _list):
if not isinstance(_list, list):
_list = list(_list)
self._l_ = _list
def __getitem__(self, k):
val = self._l_[k]
if isinstance(val, slice):
return AttributeList(val)
return _wrap(val)
def __iter__(self):
return map(lambda i: _wrap(i), self._l_)
def __bool__(self):
return bool(self._l_)
class AttributeDict:
"""Helper class to provide attribute like access to Dictionary objects."""
def __init__(self, d):
super().__setattr__("_d_", d)
def __getattr__(self, attr_name):
try:
return self.__getitem__(attr_name)
except KeyError:
raise AttributeError(f"{self.__class__.__name__!r} object has no attribute {attr_name!r}")
def __getitem__(self, key):
return _wrap(self._d_[key])
def to_dict(self):
return self._d_
class Hit(AttributeDict):
"""
The Hit class is used to manage and access elements in a document.
It inherits from the AttributeDict class and provides
attribute-like access to its elements, similar to a dictionary.
"""
def __init__(self, document):
data = {}
if "_source" in document:
data = document["_source"]
if "fields" in document:
data.update(document["fields"])
super().__init__(data)
super().__setattr__("meta", HitMeta(document))
class HitMeta(AttributeDict):
"""
The HitMeta class is used to manage and access metadata of a document.
This class inherits from the AttributeDict class and provides
attribute-like access to its elements.
"""
def __init__(self, document, exclude=("_source", "_fields")):
d = {k[1:] if k.startswith("_") else k: v for (k, v) in document.items() if k not in exclude}
if "type" in d:
# make sure we are consistent everywhere in python
d["doc_type"] = d.pop("type")
super().__init__(d)
class ElasticSearchResponse(AttributeDict):
"""
The ElasticSearchResponse class is used to manage and access the response from an Elasticsearch search.
This class can be iterated over directly to access hits in the response. Indexing the class instance
with an integer or slice will also access the hits. The class also evaluates to True
if there are any hits in the response.
The hits property returns an AttributeList of hits in the response, with each hit transformed into
an instance of the doc_class if provided.
The response parameter stores the dictionary returned by the Elasticsearch client search method.
"""
def __init__(self, search, response, doc_class=None):
super().__setattr__("_search", search)
super().__setattr__("_doc_class", doc_class)
super().__init__(response)
def __iter__(self):
return iter(self.hits)
def __getitem__(self, key):
if isinstance(key, (slice, int)):
return self.hits[key]
return super().__getitem__(key)
def __bool__(self):
return bool(self.hits)
@property
def hits(self):
"""
This property provides access to the hits (i.e., the results) of the Elasticsearch response.
The hits are represented as an `AttributeList` of `Hit` instances, which allow for easy,
attribute-like access to the hit data.
The hits are lazily loaded, meaning they're not processed until this property is accessed.
Upon first access, the hits data from the response is processed using the `_get_result` method
of the associated `Search` instance (i.e. an instance from ElasticsearchTaskHandler class),
and the results are stored for future accesses.
Each hit also includes all the additional data present in the "hits" field of the response,
accessible as attributes of the hit.
"""
if not hasattr(self, "_hits"):
h = self._d_["hits"]
try:
hits = AttributeList(map(self._search._get_result, h["hits"]))
except AttributeError as e:
raise TypeError("Could not parse hits.", e)
super().__setattr__("_hits", hits)
for k in h:
setattr(self._hits, k, _wrap(h[k]))
return self._hits
| 5,373 | 33.012658 | 107 | py |
airflow | airflow-main/airflow/providers/elasticsearch/log/es_json_formatter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import pendulum
from airflow.utils.log.json_formatter import JSONFormatter
class ElasticsearchJSONFormatter(JSONFormatter):
"""Convert a log record to JSON with ISO 8601 date and time format."""
default_time_format = "%Y-%m-%dT%H:%M:%S"
default_msec_format = "%s.%03d"
default_tz_format = "%z"
def formatTime(self, record, datefmt=None):
"""Return the creation time of the LogRecord in ISO 8601 date/time format in the local time zone."""
dt = pendulum.from_timestamp(record.created, tz=pendulum.local_timezone())
if datefmt:
s = dt.strftime(datefmt)
else:
s = dt.strftime(self.default_time_format)
if self.default_msec_format:
s = self.default_msec_format % (s, record.msecs)
if self.default_tz_format:
s += dt.strftime(self.default_tz_format)
return s
| 1,705 | 37.772727 | 108 | py |
airflow | airflow-main/airflow/providers/elasticsearch/log/es_task_handler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import sys
import warnings
from collections import defaultdict
from datetime import datetime
from operator import attrgetter
from time import time
from typing import TYPE_CHECKING, Any, Callable, List, Tuple
from urllib.parse import quote
# Using `from elasticsearch import *` would break elasticsearch mocking used in unit test.
import elasticsearch
import pendulum
from elasticsearch.exceptions import ElasticsearchException, NotFoundError
from airflow.configuration import conf
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.providers.elasticsearch.log.es_json_formatter import ElasticsearchJSONFormatter
from airflow.providers.elasticsearch.log.es_response import ElasticSearchResponse, Hit
from airflow.utils import timezone
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import ExternalLoggingMixin, LoggingMixin
from airflow.utils.session import create_session
LOG_LINE_DEFAULTS = {"exc_text": "", "stack_info": ""}
# Elasticsearch hosted log type
EsLogMsgType = List[Tuple[str, str]]
# Compatibility: Airflow 2.3.3 and up uses this method, which accesses the
# LogTemplate model to record the log ID template used. If this function does
# not exist, the task handler should use the log_id_template attribute instead.
USE_PER_RUN_LOG_ID = hasattr(DagRun, "get_log_template")
class ElasticsearchTaskHandler(FileTaskHandler, ExternalLoggingMixin, LoggingMixin):
"""
ElasticsearchTaskHandler is a python log handler that reads logs from Elasticsearch.
Note that Airflow does not handle the indexing of logs into Elasticsearch. Instead,
Airflow flushes logs into local files. Additional software setup is required to index
the logs into Elasticsearch, such as using Filebeat and Logstash.
To efficiently query and sort Elasticsearch results, this handler assumes each
log message has a field `log_id` consists of ti primary keys:
`log_id = {dag_id}-{task_id}-{execution_date}-{try_number}`
Log messages with specific log_id are sorted based on `offset`,
which is a unique integer indicates log message's order.
Timestamps here are unreliable because multiple log messages
might have the same timestamp.
:param base_log_folder: base folder to store logs locally
:param log_id_template: log id template
:param host: Elasticsearch host name
"""
PAGE = 0
MAX_LINE_PER_PAGE = 1000
LOG_NAME = "Elasticsearch"
trigger_should_wrap = True
def __init__(
self,
base_log_folder: str,
end_of_log_mark: str,
write_stdout: bool,
json_format: bool,
json_fields: str,
host_field: str = "host",
offset_field: str = "offset",
host: str = "localhost:9200",
frontend: str = "localhost:5601",
index_patterns: str | None = conf.get("elasticsearch", "index_patterns", fallback="_all"),
es_kwargs: dict | None = conf.getsection("elasticsearch_configs"),
*,
filename_template: str | None = None,
log_id_template: str | None = None,
):
es_kwargs = es_kwargs or {}
super().__init__(base_log_folder, filename_template)
self.closed = False
self.client = elasticsearch.Elasticsearch(host.split(";"), **es_kwargs) # type: ignore[attr-defined]
if USE_PER_RUN_LOG_ID and log_id_template is not None:
warnings.warn(
"Passing log_id_template to ElasticsearchTaskHandler is deprecated and has no effect",
AirflowProviderDeprecationWarning,
)
self.log_id_template = log_id_template # Only used on Airflow < 2.3.2.
self.frontend = frontend
self.mark_end_on_close = True
self.end_of_log_mark = end_of_log_mark.strip()
self.write_stdout = write_stdout
self.json_format = json_format
self.json_fields = [label.strip() for label in json_fields.split(",")]
self.host_field = host_field
self.offset_field = offset_field
self.index_patterns = index_patterns
self.context_set = False
self.formatter: logging.Formatter
self.handler: logging.FileHandler | logging.StreamHandler # type: ignore[assignment]
self._doc_type_map: dict[Any, Any] = {}
self._doc_type: list[Any] = []
def _render_log_id(self, ti: TaskInstance, try_number: int) -> str:
with create_session() as session:
dag_run = ti.get_dagrun(session=session)
if USE_PER_RUN_LOG_ID:
log_id_template = dag_run.get_log_template(session=session).elasticsearch_id
else:
log_id_template = self.log_id_template
try:
dag = ti.task.dag
except AttributeError: # ti.task is not always set.
data_interval = (dag_run.data_interval_start, dag_run.data_interval_end)
else:
if TYPE_CHECKING:
assert dag is not None
data_interval = dag.get_run_data_interval(dag_run)
if self.json_format:
data_interval_start = self._clean_date(data_interval[0])
data_interval_end = self._clean_date(data_interval[1])
execution_date = self._clean_date(dag_run.execution_date)
else:
if data_interval[0]:
data_interval_start = data_interval[0].isoformat()
else:
data_interval_start = ""
if data_interval[1]:
data_interval_end = data_interval[1].isoformat()
else:
data_interval_end = ""
execution_date = dag_run.execution_date.isoformat()
return log_id_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
run_id=getattr(ti, "run_id", ""),
data_interval_start=data_interval_start,
data_interval_end=data_interval_end,
execution_date=execution_date,
try_number=try_number,
map_index=getattr(ti, "map_index", ""),
)
@staticmethod
def _clean_date(value: datetime | None) -> str:
"""
Clean up a date value so that it is safe to query in elasticsearch by removing reserved characters.
https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters
"""
if value is None:
return ""
return value.strftime("%Y_%m_%dT%H_%M_%S_%f")
def _group_logs_by_host(self, logs):
grouped_logs = defaultdict(list)
for log in logs:
key = getattr_nested(log, self.host_field, None) or "default_host"
grouped_logs[key].append(log)
return grouped_logs
def _read_grouped_logs(self):
return True
def _read(
self, ti: TaskInstance, try_number: int, metadata: dict | None = None
) -> tuple[EsLogMsgType, dict]:
"""
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of tuple with host and log documents, metadata.
"""
if not metadata:
metadata = {"offset": 0}
if "offset" not in metadata:
metadata["offset"] = 0
offset = metadata["offset"]
log_id = self._render_log_id(ti, try_number)
logs = self.es_read(log_id, offset, metadata)
logs_by_host = self._group_logs_by_host(logs)
next_offset = offset if not logs else attrgetter(self.offset_field)(logs[-1])
# Ensure a string here. Large offset numbers will get JSON.parsed incorrectly
# on the client. Sending as a string prevents this issue.
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
metadata["offset"] = str(next_offset)
# end_of_log_mark may contain characters like '\n' which is needed to
# have the log uploaded but will not be stored in elasticsearch.
metadata["end_of_log"] = False
for logs in logs_by_host.values():
if logs[-1].message == self.end_of_log_mark:
metadata["end_of_log"] = True
cur_ts = pendulum.now()
if "last_log_timestamp" in metadata:
last_log_ts = timezone.parse(metadata["last_log_timestamp"])
# if we are not getting any logs at all after more than N seconds of trying,
# assume logs do not exist
if int(next_offset) == 0 and cur_ts.diff(last_log_ts).in_seconds() > 5:
metadata["end_of_log"] = True
missing_log_message = (
f"*** Log {log_id} not found in Elasticsearch. "
"If your task started recently, please wait a moment and reload this page. "
"Otherwise, the logs for this task instance may have been removed."
)
return [("", missing_log_message)], metadata
if (
# Assume end of log after not receiving new log for N min,
cur_ts.diff(last_log_ts).in_minutes() >= 5
# if max_offset specified, respect it
or ("max_offset" in metadata and int(offset) >= int(metadata["max_offset"]))
):
metadata["end_of_log"] = True
if int(offset) != int(next_offset) or "last_log_timestamp" not in metadata:
metadata["last_log_timestamp"] = str(cur_ts)
# If we hit the end of the log, remove the actual end_of_log message
# to prevent it from showing in the UI.
def concat_logs(lines):
log_range = (len(lines) - 1) if lines[-1].message == self.end_of_log_mark else len(lines)
return "\n".join(self._format_msg(lines[i]) for i in range(log_range))
message = [(host, concat_logs(hosted_log)) for host, hosted_log in logs_by_host.items()]
return message, metadata
def _format_msg(self, log_line):
"""Format ES Record to match settings.LOG_FORMAT when used with json_format."""
# Using formatter._style.format makes it future proof i.e.
# if we change the formatter style from '%' to '{' or '$', this will still work
if self.json_format:
try:
return self.formatter._style.format(
logging.makeLogRecord({**LOG_LINE_DEFAULTS, **log_line.to_dict()})
)
except Exception:
pass
# Just a safe-guard to preserve backwards-compatibility
return log_line.message
def es_read(self, log_id: str, offset: int | str, metadata: dict) -> list | ElasticSearchResponse:
"""
Return the logs matching log_id in Elasticsearch and next offset or ''.
:param log_id: the log_id of the log to read.
:param offset: the offset start to read log from.
:param metadata: log metadata, used for steaming log download.
"""
query: dict[Any, Any] = {
"query": {
"bool": {
"filter": [{"range": {self.offset_field: {"gt": int(offset)}}}],
"must": [{"match_phrase": {"log_id": log_id}}],
}
}
}
try:
max_log_line = self.client.count(index=self.index_patterns, body=query)["count"]
except NotFoundError as e:
self.log.exception("The target index pattern %s does not exist", self.index_patterns)
raise e
except ElasticsearchException as e:
self.log.exception("Could not get current log size with log_id: %s", log_id)
raise e
logs: list[Any] | ElasticSearchResponse = []
if max_log_line != 0:
try:
query.update({"sort": [self.offset_field]})
res = self.client.search(
index=self.index_patterns,
body=query,
size=self.MAX_LINE_PER_PAGE,
from_=self.MAX_LINE_PER_PAGE * self.PAGE,
)
logs = ElasticSearchResponse(self, res)
except elasticsearch.exceptions.ElasticsearchException:
self.log.exception("Could not read log with log_id: %s", log_id)
return logs
def emit(self, record):
if self.handler:
setattr(record, self.offset_field, int(time() * (10**9)))
self.handler.emit(record)
def set_context(self, ti: TaskInstance) -> None:
"""
Provide task_instance context to airflow task handler.
:param ti: task instance object
"""
is_trigger_log_context = getattr(ti, "is_trigger_log_context", None)
is_ti_raw = getattr(ti, "raw", None)
self.mark_end_on_close = not is_ti_raw and not is_trigger_log_context
if self.json_format:
self.formatter = ElasticsearchJSONFormatter(
fmt=self.formatter._fmt,
json_fields=self.json_fields + [self.offset_field],
extras={
"dag_id": str(ti.dag_id),
"task_id": str(ti.task_id),
"execution_date": self._clean_date(ti.execution_date),
"try_number": str(ti.try_number),
"log_id": self._render_log_id(ti, ti.try_number),
},
)
if self.write_stdout:
if self.context_set:
# We don't want to re-set up the handler if this logger has
# already been initialized
return
self.handler = logging.StreamHandler(stream=sys.__stdout__)
self.handler.setLevel(self.level)
self.handler.setFormatter(self.formatter)
else:
super().set_context(ti)
self.context_set = True
def close(self) -> None:
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
# todo: remove `getattr` when min airflow version >= 2.6
if not self.mark_end_on_close or getattr(self, "ctx_task_deferred", None):
# when we're closing due to task deferral, don't mark end of log
self.closed = True
return
# Case which context of the handler was not set.
if self.handler is None:
self.closed = True
return
# Reopen the file stream, because FileHandler.close() would be called
# first in logging.shutdown() and the stream in it would be set to None.
if self.handler.stream is None or self.handler.stream.closed: # type: ignore[attr-defined]
self.handler.stream = self.handler._open() # type: ignore[union-attr]
# Mark the end of file using end of log mark,
# so we know where to stop while auto-tailing.
self.emit(logging.makeLogRecord({"msg": self.end_of_log_mark}))
if self.write_stdout:
self.handler.close()
sys.stdout = sys.__stdout__
super().close()
self.closed = True
@property
def log_name(self) -> str:
"""The log name."""
return self.LOG_NAME
def get_external_log_url(self, task_instance: TaskInstance, try_number: int) -> str:
"""
Creates an address for an external log collecting service.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from.
:return: URL to the external log collection service
"""
log_id = self._render_log_id(task_instance, try_number)
scheme = "" if "://" in self.frontend else "https://"
return scheme + self.frontend.format(log_id=quote(log_id))
@property
def supports_external_link(self) -> bool:
"""Whether we can support external links."""
return bool(self.frontend)
def _resolve_nested(self, hit: dict[Any, Any], parent_class=None) -> type[Hit]:
"""
Resolves nested hits from Elasticsearch by iteratively navigating the `_nested` field.
The result is used to fetch the appropriate document class to handle the hit.
This method can be used with nested Elasticsearch fields which are structured
as dictionaries with "field" and "_nested" keys.
"""
doc_class = Hit
nested_path: list[str] = []
nesting = hit["_nested"]
while nesting and "field" in nesting:
nested_path.append(nesting["field"])
nesting = nesting.get("_nested")
nested_path_str = ".".join(nested_path)
if hasattr(parent_class, "_index"):
nested_field = parent_class._index.resolve_field(nested_path_str)
if nested_field is not None:
return nested_field._doc_class
return doc_class
def _get_result(self, hit: dict[Any, Any], parent_class=None) -> Hit:
"""
This method processes a hit (i.e., a result) from an Elasticsearch response and transforms it into an
appropriate class instance.
The transformation depends on the contents of the hit. If the document in hit contains a nested field,
the '_resolve_nested' method is used to determine the appropriate class (based on the nested path).
If the hit has a document type that is present in the '_doc_type_map', the corresponding class is
used. If not, the method iterates over the '_doc_type' classes and uses the first one whose '_matches'
method returns True for the hit.
If the hit contains any 'inner_hits', these are also processed into 'ElasticSearchResponse' instances
using the determined class.
Finally, the transformed hit is returned. If the determined class has a 'from_es' method, this is
used to transform the hit
An example of the hit argument:
{'_id': 'jdeZT4kBjAZqZnexVUxk',
'_index': '.ds-filebeat-8.8.2-2023.07.09-000001',
'_score': 2.482621,
'_source': {'@timestamp': '2023-07-13T14:13:15.140Z',
'asctime': '2023-07-09T07:47:43.907+0000',
'container': {'id': 'airflow'},
'dag_id': 'example_bash_operator',
'ecs': {'version': '8.0.0'},
'execution_date': '2023_07_09T07_47_32_000000',
'filename': 'taskinstance.py',
'input': {'type': 'log'},
'levelname': 'INFO',
'lineno': 1144,
'log': {'file': {'path': "/opt/airflow/Documents/GitHub/airflow/logs/
dag_id=example_bash_operator'/run_id=owen_run_run/
task_id=run_after_loop/attempt=1.log"},
'offset': 0},
'log.offset': 1688888863907337472,
'log_id': 'example_bash_operator-run_after_loop-owen_run_run--1-1',
'message': 'Dependencies all met for dep_context=non-requeueable '
'deps ti=<TaskInstance: '
'example_bash_operator.run_after_loop owen_run_run '
'[queued]>',
'task_id': 'run_after_loop',
'try_number': '1'},
'_type': '_doc'}
"""
doc_class = Hit
dt = hit.get("_type")
if "_nested" in hit:
doc_class = self._resolve_nested(hit, parent_class)
elif dt in self._doc_type_map:
doc_class = self._doc_type_map[dt]
else:
for doc_type in self._doc_type:
if hasattr(doc_type, "_matches") and doc_type._matches(hit):
doc_class = doc_type
break
for t in hit.get("inner_hits", ()):
hit["inner_hits"][t] = ElasticSearchResponse(self, hit["inner_hits"][t], doc_class=doc_class)
# callback should get the Hit class if "from_es" is not defined
callback: type[Hit] | Callable[..., Any] = getattr(doc_class, "from_es", doc_class)
return callback(hit)
def getattr_nested(obj, item, default):
"""
Get item from obj but return default if not found.
E.g. calling ``getattr_nested(a, 'b.c', "NA")`` will return
``a.b.c`` if such a value exists, and "NA" otherwise.
:meta private:
"""
try:
return attrgetter(item)(obj)
except AttributeError:
return default
| 21,810 | 40.544762 | 126 | py |
airflow | airflow-main/airflow/providers/elasticsearch/log/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/asana/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "2.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-asana:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,530 | 35.452381 | 114 | py |
airflow | airflow-main/airflow/providers/asana/operators/asana_tasks.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.asana.hooks.asana import AsanaHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AsanaCreateTaskOperator(BaseOperator):
"""
This operator can be used to create Asana tasks.
.. seealso::
For more information on Asana optional task parameters:
https://developers.asana.com/docs/create-a-task
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AsanaCreateTaskOperator`
:param conn_id: The Asana connection to use.
:param name: Name of the Asana task.
:param task_parameters: Any of the optional task creation parameters.
See https://developers.asana.com/docs/create-a-task for a complete list.
You must specify at least one of 'workspace', 'parent', or 'projects'
either here or in the connection.
"""
def __init__(
self,
*,
conn_id: str,
name: str,
task_parameters: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.conn_id = conn_id
self.name = name
self.task_parameters = task_parameters
def execute(self, context: Context) -> str:
hook = AsanaHook(conn_id=self.conn_id)
response = hook.create_task(self.name, self.task_parameters)
self.log.info(response)
return response["gid"]
class AsanaUpdateTaskOperator(BaseOperator):
"""
This operator can be used to update Asana tasks.
.. seealso::
For more information on Asana optional task parameters:
https://developers.asana.com/docs/update-a-task
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AsanaUpdateTaskOperator`
:param conn_id: The Asana connection to use.
:param asana_task_gid: Asana task ID to update
:param task_parameters: Any task parameters that should be updated.
See https://developers.asana.com/docs/update-a-task for a complete list.
"""
def __init__(
self,
*,
conn_id: str,
asana_task_gid: str,
task_parameters: dict,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.conn_id = conn_id
self.asana_task_gid = asana_task_gid
self.task_parameters = task_parameters
def execute(self, context: Context) -> None:
hook = AsanaHook(conn_id=self.conn_id)
response = hook.update_task(self.asana_task_gid, self.task_parameters)
self.log.info(response)
class AsanaDeleteTaskOperator(BaseOperator):
"""
This operator can be used to delete Asana tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AsanaDeleteTaskOperator`
:param conn_id: The Asana connection to use.
:param asana_task_gid: Asana Task ID to delete.
"""
def __init__(
self,
*,
conn_id: str,
asana_task_gid: str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.conn_id = conn_id
self.asana_task_gid = asana_task_gid
def execute(self, context: Context) -> None:
hook = AsanaHook(conn_id=self.conn_id)
response = hook.delete_task(self.asana_task_gid)
self.log.info(response)
class AsanaFindTaskOperator(BaseOperator):
"""
This operator can be used to retrieve Asana tasks that match various filters.
.. seealso::
For a list of possible filters:
https://developers.asana.com/docs/update-a-task
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AsanaFindTaskOperator`
:param conn_id: The Asana connection to use.
:param search_parameters: The parameters used to find relevant tasks. You must
specify at least one of `project`, `section`, `tag`, `user_task_list`, or both
`assignee` and `workspace` either here or in the connection.
"""
def __init__(
self,
*,
conn_id: str,
search_parameters: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.conn_id = conn_id
self.search_parameters = search_parameters
def execute(self, context: Context) -> list:
hook = AsanaHook(conn_id=self.conn_id)
response = hook.find_task(self.search_parameters)
self.log.info(response)
return response
| 5,491 | 30.745665 | 86 | py |
airflow | airflow-main/airflow/providers/asana/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/asana/hooks/asana.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Connect to Asana."""
from __future__ import annotations
from functools import cached_property, wraps
from typing import Any
from asana import Client # type: ignore[attr-defined]
from asana.error import NotFoundError # type: ignore[attr-defined]
from airflow.hooks.base import BaseHook
def _ensure_prefixes(conn_type):
"""Remove when provider min airflow version >= 2.5.0 since this is now handled by provider manager."""
def dec(func):
@wraps(func)
def inner():
field_behaviors = func()
conn_attrs = {"host", "schema", "login", "password", "port", "extra"}
def _ensure_prefix(field):
if field not in conn_attrs and not field.startswith("extra__"):
return f"extra__{conn_type}__{field}"
else:
return field
if "placeholders" in field_behaviors:
placeholders = field_behaviors["placeholders"]
field_behaviors["placeholders"] = {_ensure_prefix(k): v for k, v in placeholders.items()}
return field_behaviors
return inner
return dec
class AsanaHook(BaseHook):
"""Wrapper around Asana Python client library."""
conn_name_attr = "asana_conn_id"
default_conn_name = "asana_default"
conn_type = "asana"
hook_name = "Asana"
def __init__(self, conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.connection = self.get_connection(conn_id)
extras = self.connection.extra_dejson
self.workspace = self._get_field(extras, "workspace") or None
self.project = self._get_field(extras, "project") or None
def _get_field(self, extras: dict, field_name: str):
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
backcompat_prefix = "extra__asana__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
"when using this method."
)
if field_name in extras:
return extras[field_name] or None
prefixed_name = f"{backcompat_prefix}{field_name}"
return extras.get(prefixed_name) or None
def get_conn(self) -> Client:
return self.client
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"workspace": StringField(lazy_gettext("Workspace"), widget=BS3TextFieldWidget()),
"project": StringField(lazy_gettext("Project"), widget=BS3TextFieldWidget()),
}
@staticmethod
@_ensure_prefixes(conn_type="asana")
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["port", "host", "login", "schema"],
"relabeling": {},
"placeholders": {
"password": "Asana personal access token",
"workspace": "Asana workspace gid",
"project": "Asana project gid",
},
}
@cached_property
def client(self) -> Client:
"""Instantiates python-asana Client."""
if not self.connection.password:
raise ValueError(
"Asana connection password must contain a personal access token: "
"https://developers.asana.com/docs/personal-access-token"
)
return Client.access_token(self.connection.password)
def create_task(self, task_name: str, params: dict | None) -> dict:
"""
Creates an Asana task.
:param task_name: Name of the new task
:param params: Other task attributes, such as due_on, parent, and notes. For a complete list
of possible parameters, see https://developers.asana.com/docs/create-a-task
:return: A dict of attributes of the created task, including its gid
"""
merged_params = self._merge_create_task_parameters(task_name, params)
self._validate_create_task_parameters(merged_params)
response = self.client.tasks.create(params=merged_params)
return response
def _merge_create_task_parameters(self, task_name: str, task_params: dict | None) -> dict:
"""
Merge create_task parameters with default params from the connection.
:param task_name: Name of the task
:param task_params: Other task parameters which should override defaults from the connection
:return: A dict of merged parameters to use in the new task
"""
merged_params: dict[str, Any] = {"name": task_name}
if self.project:
merged_params["projects"] = [self.project]
# Only use default workspace if user did not provide a project id
elif self.workspace and not (task_params and ("projects" in task_params)):
merged_params["workspace"] = self.workspace
if task_params:
merged_params.update(task_params)
return merged_params
@staticmethod
def _validate_create_task_parameters(params: dict) -> None:
"""
Check that user provided minimal parameters for task creation.
:param params: A dict of attributes the task to be created should have
:return: None; raises ValueError if `params` doesn't contain required parameters
"""
required_parameters = {"workspace", "projects", "parent"}
if required_parameters.isdisjoint(params):
raise ValueError(
f"You must specify at least one of {required_parameters} in the create_task parameters"
)
def delete_task(self, task_id: str) -> dict:
"""
Deletes an Asana task.
:param task_id: Asana GID of the task to delete
:return: A dict containing the response from Asana
"""
try:
response = self.client.tasks.delete_task(task_id)
return response
except NotFoundError:
self.log.info("Asana task %s not found for deletion.", task_id)
return {}
def find_task(self, params: dict | None) -> list:
"""
Retrieves a list of Asana tasks that match search parameters.
:param params: Attributes that matching tasks should have. For a list of possible parameters,
see https://developers.asana.com/docs/get-multiple-tasks
:return: A list of dicts containing attributes of matching Asana tasks
"""
merged_params = self._merge_find_task_parameters(params)
self._validate_find_task_parameters(merged_params)
response = self.client.tasks.find_all(params=merged_params)
return list(response)
def _merge_find_task_parameters(self, search_parameters: dict | None) -> dict:
"""
Merge find_task parameters with default params from the connection.
:param search_parameters: Attributes that tasks matching the search should have; these override
defaults from the connection
:return: A dict of merged parameters to use in the search
"""
merged_params = {}
if self.project:
merged_params["project"] = self.project
# Only use default workspace if user did not provide a project id
elif self.workspace and not (search_parameters and ("project" in search_parameters)):
merged_params["workspace"] = self.workspace
if search_parameters:
merged_params.update(search_parameters)
return merged_params
@staticmethod
def _validate_find_task_parameters(params: dict) -> None:
"""
Check that the user provided minimal search parameters.
:param params: Dict of parameters to be used in the search
:return: None; raises ValueError if search parameters do not contain minimum required attributes
"""
one_of_list = {"project", "section", "tag", "user_task_list"}
both_of_list = {"assignee", "workspace"}
contains_both = both_of_list.issubset(params)
contains_one = not one_of_list.isdisjoint(params)
if not (contains_both or contains_one):
raise ValueError(
f"You must specify at least one of {one_of_list} "
f"or both of {both_of_list} in the find_task parameters."
)
def update_task(self, task_id: str, params: dict) -> dict:
"""
Updates an existing Asana task.
:param task_id: Asana GID of task to update
:param params: New values of the task's attributes. For a list of possible parameters, see
https://developers.asana.com/docs/update-a-task
:return: A dict containing the updated task's attributes
"""
response = self.client.tasks.update(task_id, params)
return response
def create_project(self, params: dict) -> dict:
"""
Creates a new project.
:param params: Attributes that the new project should have. See
https://developers.asana.com/docs/create-a-project#create-a-project-parameters
for a list of possible parameters.
:return: A dict containing the new project's attributes, including its GID.
"""
merged_params = self._merge_project_parameters(params)
self._validate_create_project_parameters(merged_params)
response = self.client.projects.create(merged_params)
return response
@staticmethod
def _validate_create_project_parameters(params: dict) -> None:
"""
Check that user provided the minimum required parameters for project creation.
:param params: Attributes that the new project should have
:return: None; raises a ValueError if `params` does not contain the minimum required attributes.
"""
required_parameters = {"workspace", "team"}
if required_parameters.isdisjoint(params):
raise ValueError(
f"You must specify at least one of {required_parameters} in the create_project params"
)
def _merge_project_parameters(self, params: dict) -> dict:
"""
Merge parameters passed into a project method with default params from the connection.
:param params: Parameters passed into one of the project methods, which should override
defaults from the connection
:return: A dict of merged parameters
"""
merged_params = {} if self.workspace is None else {"workspace": self.workspace}
merged_params.update(params)
return merged_params
def find_project(self, params: dict) -> list:
"""
Retrieves a list of Asana projects that match search parameters.
:param params: Attributes which matching projects should have. See
https://developers.asana.com/docs/get-multiple-projects
for a list of possible parameters.
:return: A list of dicts containing attributes of matching Asana projects
"""
merged_params = self._merge_project_parameters(params)
response = self.client.projects.find_all(merged_params)
return list(response)
def update_project(self, project_id: str, params: dict) -> dict:
"""
Updates an existing project.
:param project_id: Asana GID of the project to update
:param params: New attributes that the project should have. See
https://developers.asana.com/docs/update-a-project#update-a-project-parameters
for a list of possible parameters
:return: A dict containing the updated project's attributes
"""
response = self.client.projects.update(project_id, params)
return response
def delete_project(self, project_id: str) -> dict:
"""
Deletes a project.
:param project_id: Asana GID of the project to delete
:return: A dict containing the response from Asana
"""
try:
response = self.client.projects.delete(project_id)
return response
except NotFoundError:
self.log.info("Asana project %s not found for deletion.", project_id)
return {}
| 13,309 | 40.20743 | 110 | py |
airflow | airflow-main/airflow/providers/asana/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/cli/simple_table.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import inspect
import json
from typing import Any, Callable, Sequence
from rich.box import ASCII_DOUBLE_HEAD
from rich.console import Console
from rich.syntax import Syntax
from rich.table import Table
from tabulate import tabulate
from airflow.plugins_manager import PluginsDirectorySource
from airflow.typing_compat import TypeGuard
from airflow.utils import yaml
from airflow.utils.platform import is_tty
def is_data_sequence(data: Sequence[dict | Any]) -> TypeGuard[Sequence[dict]]:
return all(isinstance(d, dict) for d in data)
class AirflowConsole(Console):
"""Airflow rich console."""
def __init__(self, show_header: bool = True, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set the width to constant to pipe whole output from console
self._width = 200 if not is_tty() else self._width
# If show header in tables
self.show_header = show_header
def print_as_json(self, data: dict):
"""Renders dict as json text representation."""
json_content = json.dumps(data)
self.print(Syntax(json_content, "json", theme="ansi_dark"), soft_wrap=True)
def print_as_yaml(self, data: dict):
"""Renders dict as yaml text representation."""
yaml_content = yaml.dump(data)
self.print(Syntax(yaml_content, "yaml", theme="ansi_dark"), soft_wrap=True)
def print_as_table(self, data: list[dict]):
"""Renders list of dictionaries as table."""
if not data:
self.print("No data found")
return
table = SimpleTable(show_header=self.show_header)
for col in data[0].keys():
table.add_column(col)
for row in data:
table.add_row(*(str(d) for d in row.values()))
self.print(table)
def print_as_plain_table(self, data: list[dict]):
"""Renders list of dictionaries as a simple table than can be easily piped."""
if not data:
self.print("No data found")
return
rows = [d.values() for d in data]
output = tabulate(rows, tablefmt="plain", headers=list(data[0].keys()))
print(output)
def _normalize_data(self, value: Any, output: str) -> list | str | dict | None:
if isinstance(value, (tuple, list)):
if output == "table":
return ",".join(str(self._normalize_data(x, output)) for x in value)
return [self._normalize_data(x, output) for x in value]
if isinstance(value, dict) and output != "table":
return {k: self._normalize_data(v, output) for k, v in value.items()}
if inspect.isclass(value) and not isinstance(value, PluginsDirectorySource):
return value.__name__
if value is None:
return None
return str(value)
def print_as(
self,
data: Sequence[dict | Any],
output: str,
mapper: Callable[[Any], dict] | None = None,
) -> None:
"""Prints provided using format specified by output argument."""
output_to_renderer: dict[str, Callable[[Any], None]] = {
"json": self.print_as_json,
"yaml": self.print_as_yaml,
"table": self.print_as_table,
"plain": self.print_as_plain_table,
}
renderer = output_to_renderer.get(output)
if not renderer:
raise ValueError(
f"Unknown formatter: {output}. Allowed options: {list(output_to_renderer.keys())}"
)
if mapper:
dict_data: Sequence[dict] = [mapper(d) for d in data]
elif is_data_sequence(data):
dict_data = data
else:
raise ValueError("To tabulate non-dictionary data you need to provide `mapper` function")
dict_data = [{k: self._normalize_data(v, output) for k, v in d.items()} for d in dict_data]
renderer(dict_data)
class SimpleTable(Table):
"""A rich Table with some default hardcoded for consistency."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.show_edge = kwargs.get("show_edge", False)
self.pad_edge = kwargs.get("pad_edge", False)
self.box = kwargs.get("box", ASCII_DOUBLE_HEAD)
self.show_header = kwargs.get("show_header", False)
self.title_style = kwargs.get("title_style", "bold green")
self.title_justify = kwargs.get("title_justify", "left")
self.caption = kwargs.get("caption", " ")
def add_column(self, *args, **kwargs) -> None:
"""Add a column to the table. We use different default."""
kwargs["overflow"] = kwargs.get("overflow") # to avoid truncating
super().add_column(*args, **kwargs)
| 5,539 | 38.014085 | 101 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.