repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow | airflow-main/airflow/providers/plexus/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/plexus/operators/job.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import time
from typing import Any
import requests
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.plexus.hooks.plexus import PlexusHook
logger = logging.getLogger(__name__)
class PlexusJobOperator(BaseOperator):
"""
Submits a Plexus job.
:param job_params: parameters required to launch a job.
Required job parameters are the following
- "name": job name created by user.
- "app": name of the application to run. found in Plexus UI.
- "queue": public cluster name. found in Plexus UI.
- "num_nodes": number of nodes.
- "num_cores": number of cores per node.
"""
def __init__(self, job_params: dict, **kwargs) -> None:
super().__init__(**kwargs)
self.job_params = job_params
self.required_params = {"name", "app", "queue", "num_cores", "num_nodes"}
self.lookups = {
"app": ("apps/", "id", "name"),
"billing_account_id": ("users/{}/billingaccounts/", "id", None),
"queue": ("queues/", "id", "public_name"),
}
self.job_params.update({"billing_account_id": None})
self.is_service = None
def execute(self, context: Any) -> Any:
hook = PlexusHook()
params = self.construct_job_params(hook)
if self.is_service is True:
if self.job_params.get("expected_runtime") is None:
end_state = "Running"
else:
end_state = "Finished"
elif self.is_service is False:
end_state = "Completed"
else:
raise AirflowException(
"Unable to determine if application "
"is running as a batch job or service. "
"Contact Core Scientific AI Team."
)
logger.info("creating job w/ following params: %s", params)
jobs_endpoint = hook.host + "jobs/"
headers = {"Authorization": f"Bearer {hook.token}"}
create_job = requests.post(jobs_endpoint, headers=headers, data=params, timeout=5)
if create_job.ok:
job = create_job.json()
jid = job["id"]
state = job["last_state"]
while state != end_state:
time.sleep(3)
jid_endpoint = jobs_endpoint + f"{jid}/"
get_job = requests.get(jid_endpoint, headers=headers, timeout=5)
if not get_job.ok:
raise AirflowException(
"Could not retrieve job status. "
f"Status Code: [{get_job.status_code}]. Reason: {get_job.reason} - {get_job.text}"
)
new_state = get_job.json()["last_state"]
if new_state in ("Cancelled", "Failed"):
raise AirflowException(f"Job {new_state}")
elif new_state != state:
logger.info("job is %s", new_state)
state = new_state
else:
raise AirflowException(
"Could not start job. "
f"Status Code: [{create_job.status_code}]. Reason: {create_job.reason} - {create_job.text}"
)
def _api_lookup(self, param: str, hook):
lookup = self.lookups[param]
key = lookup[1]
mapping = None if lookup[2] is None else (lookup[2], self.job_params[param])
if param == "billing_account_id":
endpoint = hook.host + lookup[0].format(hook.user_id)
else:
endpoint = hook.host + lookup[0]
headers = {"Authorization": f"Bearer {hook.token}"}
response = requests.get(endpoint, headers=headers, timeout=5)
results = response.json()["results"]
v = None
if mapping is None:
v = results[0][key]
else:
for dct in results:
if dct[mapping[0]] == mapping[1]:
v = dct[key]
if param == "app":
self.is_service = dct["is_service"]
if v is None:
raise AirflowException(f"Could not locate value for param:{key} at endpoint: {endpoint}")
return v
def construct_job_params(self, hook: Any) -> dict[Any, Any | None]:
"""
Creates job_params dict for api call to launch a Plexus job.
Some parameters required to launch a job
are not available to the user in the Plexus
UI. For example, an app id is required, but
only the app name is provided in the UI.
This function acts as a backend lookup
of the required param value using the
user-provided value.
:param hook: plexus hook object
"""
missing_params = self.required_params - set(self.job_params)
if len(missing_params) > 0:
raise AirflowException(f"Missing the following required job_params: {', '.join(missing_params)}")
params = {}
for prm in self.job_params:
if prm in self.lookups:
v = self._api_lookup(param=prm, hook=hook)
params[prm] = v
else:
params[prm] = self.job_params[prm]
return params
| 6,047 | 37.522293 | 109 | py |
airflow | airflow-main/airflow/providers/plexus/hooks/plexus.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
import arrow
import jwt
import requests
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Variable
class PlexusHook(BaseHook):
"""
Used for jwt token generation and storage to make Plexus API calls.
Requires email and password Airflow variables be created.
Example:
- export AIRFLOW_VAR_EMAIL = [email protected]
- export AIRFLOW_VAR_PASSWORD = *******
"""
def __init__(self) -> None:
super().__init__()
self.__token = None
self.__token_exp = None
self.host = "https://apiplexus.corescientific.com/"
self.user_id = None
def _generate_token(self) -> Any:
login = Variable.get("email")
pwd = Variable.get("password")
if login is None or pwd is None:
raise AirflowException("No valid email/password supplied.")
token_endpoint = self.host + "sso/jwt-token/"
response = requests.post(token_endpoint, data={"email": login, "password": pwd}, timeout=5)
if not response.ok:
raise AirflowException(
"Could not retrieve JWT Token. "
f"Status Code: [{response.status_code}]. Reason: {response.reason} - {response.text}"
)
token = response.json()["access"]
payload = jwt.decode(token, verify=False)
self.user_id = payload["user_id"]
self.__token_exp = payload["exp"]
return token
@property
def token(self) -> Any:
"""Returns users token."""
if self.__token is not None:
if not self.__token_exp or arrow.get(self.__token_exp) <= arrow.now():
self.__token = self._generate_token()
return self.__token
else:
self.__token = self._generate_token()
return self.__token
| 2,713 | 33.794872 | 101 | py |
airflow | airflow-main/airflow/providers/plexus/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/odbc/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.0.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-odbc:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,529 | 35.428571 | 113 | py |
airflow | airflow-main/airflow/providers/odbc/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/odbc/hooks/odbc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains ODBC hook."""
from __future__ import annotations
from typing import Any
from urllib.parse import quote_plus
import pyodbc
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.utils.helpers import merge_dicts
class OdbcHook(DbApiHook):
"""
Interact with odbc data sources using pyodbc.
To configure driver, in addition to supplying as constructor arg, the following are also supported:
* set ``driver`` parameter in ``hook_params`` dictionary when instantiating hook by SQL operators.
* set ``driver`` extra in the connection and set ``allow_driver_in_extra`` to True in
section ``providers.odbc`` section of airflow config.
* patch ``OdbcHook.default_driver`` in ``local_settings.py`` file.
See :doc:`/connections/odbc` for full documentation.
:param args: passed to DbApiHook
:param database: database to use -- overrides connection ``schema``
:param driver: name of driver or path to driver. see above for more info
:param dsn: name of DSN to use. overrides DSN supplied in connection ``extra``
:param connect_kwargs: keyword arguments passed to ``pyodbc.connect``
:param sqlalchemy_scheme: Scheme sqlalchemy connection. Default is ``mssql+pyodbc`` Only used for
``get_sqlalchemy_engine`` and ``get_sqlalchemy_connection`` methods.
:param kwargs: passed to DbApiHook
"""
DEFAULT_SQLALCHEMY_SCHEME = "mssql+pyodbc"
conn_name_attr = "odbc_conn_id"
default_conn_name = "odbc_default"
conn_type = "odbc"
hook_name = "ODBC"
supports_autocommit = True
default_driver: str | None = None
def __init__(
self,
*args,
database: str | None = None,
driver: str | None = None,
dsn: str | None = None,
connect_kwargs: dict | None = None,
sqlalchemy_scheme: str | None = None,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self._database = database
self._driver = driver
self._dsn = dsn
self._conn_str = None
self._sqlalchemy_scheme = sqlalchemy_scheme
self._connection = None
self._connect_kwargs = connect_kwargs
@property
def connection(self):
"""The Connection object with ID ``odbc_conn_id``."""
if not self._connection:
self._connection = self.get_connection(getattr(self, self.conn_name_attr))
return self._connection
@property
def database(self) -> str | None:
"""Database provided in init if exists; otherwise, ``schema`` from ``Connection`` object."""
return self._database or self.connection.schema
@property
def sqlalchemy_scheme(self) -> str:
"""SQLAlchemy scheme either from constructor, connection extras or default."""
extra_scheme = self.connection_extra_lower.get("sqlalchemy_scheme")
if not self._sqlalchemy_scheme and extra_scheme and (":" in extra_scheme or "/" in extra_scheme):
raise RuntimeError("sqlalchemy_scheme in connection extra should not contain : or / characters")
return self._sqlalchemy_scheme or extra_scheme or self.DEFAULT_SQLALCHEMY_SCHEME
@property
def connection_extra_lower(self) -> dict:
"""
``connection.extra_dejson`` but where keys are converted to lower case.
This is used internally for case-insensitive access of odbc params.
"""
return {k.lower(): v for k, v in self.connection.extra_dejson.items()}
@property
def driver(self) -> str | None:
"""Driver from init param if given; else try to find one in connection extra."""
extra_driver = self.connection_extra_lower.get("driver")
from airflow.configuration import conf
if extra_driver and conf.getboolean("providers.odbc", "allow_driver_in_extra", fallback=False):
self._driver = extra_driver
else:
self.log.warning(
"You have supplied 'driver' via connection extra but it will not be used. In order to "
"use 'driver' from extra you must set airflow config setting `allow_driver_in_extra = True` "
"in section `providers.odbc`. Alternatively you may specify driver via 'driver' parameter of "
"the hook constructor or via 'hook_params' dictionary with key 'driver' if using SQL "
"operators."
)
if not self._driver:
self._driver = self.default_driver
return self._driver.strip().lstrip("{").rstrip("}").strip() if self._driver else None
@property
def dsn(self) -> str | None:
"""DSN from init param if given; else try to find one in connection extra."""
if not self._dsn:
dsn = self.connection_extra_lower.get("dsn")
if dsn:
self._dsn = dsn.strip()
return self._dsn
@property
def odbc_connection_string(self):
"""ODBC connection string.
We build connection string instead of using ``pyodbc.connect`` params
because, for example, there is no param representing
``ApplicationIntent=ReadOnly``. Any key-value pairs provided in
``Connection.extra`` will be added to the connection string.
"""
if not self._conn_str:
conn_str = ""
if self.driver:
conn_str += f"DRIVER={{{self.driver}}};"
if self.dsn:
conn_str += f"DSN={self.dsn};"
if self.connection.host:
conn_str += f"SERVER={self.connection.host};"
database = self.database or self.connection.schema
if database:
conn_str += f"DATABASE={database};"
if self.connection.login:
conn_str += f"UID={self.connection.login};"
if self.connection.password:
conn_str += f"PWD={self.connection.password};"
if self.connection.port:
conn_str += f"PORT={self.connection.port};"
extra_exclude = {"driver", "dsn", "connect_kwargs", "sqlalchemy_scheme"}
extra_params = {
k: v for k, v in self.connection.extra_dejson.items() if k.lower() not in extra_exclude
}
for k, v in extra_params.items():
conn_str += f"{k}={v};"
self._conn_str = conn_str
return self._conn_str
@property
def connect_kwargs(self) -> dict:
"""Effective kwargs to be passed to ``pyodbc.connect``.
The kwargs are merged from connection extra, ``connect_kwargs``, and
the hook's init arguments. Values received to the hook precede those
from the connection.
If ``attrs_before`` is provided, keys and values are converted to int,
as required by pyodbc.
"""
conn_connect_kwargs = self.connection_extra_lower.get("connect_kwargs", {})
hook_connect_kwargs = self._connect_kwargs or {}
merged_connect_kwargs = merge_dicts(conn_connect_kwargs, hook_connect_kwargs)
if "attrs_before" in merged_connect_kwargs:
merged_connect_kwargs["attrs_before"] = {
int(k): int(v) for k, v in merged_connect_kwargs["attrs_before"].items()
}
return merged_connect_kwargs
def get_conn(self) -> pyodbc.Connection:
"""Returns a pyodbc connection object."""
conn = pyodbc.connect(self.odbc_connection_string, **self.connect_kwargs)
return conn
def get_uri(self) -> str:
"""URI invoked in :meth:`~airflow.providers.common.sql.hooks.sql.DbApiHook.get_sqlalchemy_engine`."""
quoted_conn_str = quote_plus(self.odbc_connection_string)
uri = f"{self.sqlalchemy_scheme}:///?odbc_connect={quoted_conn_str}"
return uri
def get_sqlalchemy_connection(
self, connect_kwargs: dict | None = None, engine_kwargs: dict | None = None
) -> Any:
"""SQLAlchemy connection object."""
engine = self.get_sqlalchemy_engine(engine_kwargs=engine_kwargs)
cnx = engine.connect(**(connect_kwargs or {}))
return cnx
| 8,946 | 40.808411 | 110 | py |
airflow | airflow-main/airflow/providers/alibaba/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "2.5.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-alibaba:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,532 | 35.5 | 116 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/operators/oss.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Alibaba Cloud OSS operators."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.alibaba.cloud.hooks.oss import OSSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class OSSCreateBucketOperator(BaseOperator):
"""
This operator creates an OSS bucket.
:param region: OSS region you want to create bucket
:param bucket_name: This is bucket name you want to create
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
def __init__(
self,
region: str,
bucket_name: str | None = None,
oss_conn_id: str = "oss_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.oss_conn_id = oss_conn_id
self.region = region
self.bucket_name = bucket_name
def execute(self, context: Context):
oss_hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
oss_hook.create_bucket(bucket_name=self.bucket_name)
class OSSDeleteBucketOperator(BaseOperator):
"""
This operator to delete an OSS bucket.
:param region: OSS region you want to create bucket
:param bucket_name: This is bucket name you want to delete
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
def __init__(
self,
region: str,
bucket_name: str | None = None,
oss_conn_id: str = "oss_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.oss_conn_id = oss_conn_id
self.region = region
self.bucket_name = bucket_name
def execute(self, context: Context):
oss_hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
oss_hook.delete_bucket(bucket_name=self.bucket_name)
class OSSUploadObjectOperator(BaseOperator):
"""
This operator to upload an file-like object.
:param key: the OSS path of the object
:param file: local file to upload.
:param region: OSS region you want to create bucket
:param bucket_name: This is bucket name you want to create
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
def __init__(
self,
key: str,
file: str,
region: str,
bucket_name: str | None = None,
oss_conn_id: str = "oss_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.key = key
self.file = file
self.oss_conn_id = oss_conn_id
self.region = region
self.bucket_name = bucket_name
def execute(self, context: Context):
oss_hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
oss_hook.upload_local_file(bucket_name=self.bucket_name, key=self.key, file=self.file)
class OSSDownloadObjectOperator(BaseOperator):
"""
This operator to Download an OSS object.
:param key: key of the object to download.
:param local_file: local path + file name to save.
:param region: OSS region
:param bucket_name: OSS bucket name
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
def __init__(
self,
key: str,
file: str,
region: str,
bucket_name: str | None = None,
oss_conn_id: str = "oss_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.key = key
self.file = file
self.oss_conn_id = oss_conn_id
self.region = region
self.bucket_name = bucket_name
def execute(self, context: Context):
oss_hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
oss_hook.download_file(bucket_name=self.bucket_name, key=self.key, local_file=self.file)
class OSSDeleteBatchObjectOperator(BaseOperator):
"""
This operator to delete OSS objects.
:param key: key list of the objects to delete.
:param region: OSS region
:param bucket_name: OSS bucket name
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
def __init__(
self,
keys: list,
region: str,
bucket_name: str | None = None,
oss_conn_id: str = "oss_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.keys = keys
self.oss_conn_id = oss_conn_id
self.region = region
self.bucket_name = bucket_name
def execute(self, context: Context):
oss_hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
oss_hook.delete_objects(bucket_name=self.bucket_name, key=self.keys)
class OSSDeleteObjectOperator(BaseOperator):
"""
This operator to delete an OSS object.
:param key: key of the object to delete.
:param region: OSS region
:param bucket_name: OSS bucket name
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
def __init__(
self,
key: str,
region: str,
bucket_name: str | None = None,
oss_conn_id: str = "oss_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.key = key
self.oss_conn_id = oss_conn_id
self.region = region
self.bucket_name = bucket_name
def execute(self, context: Context):
oss_hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
oss_hook.delete_object(bucket_name=self.bucket_name, key=self.key)
| 6,320 | 30.292079 | 96 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/operators/analyticdb_spark.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from time import sleep
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.alibaba.cloud.hooks.analyticdb_spark import AnalyticDBSparkHook, AppState
if TYPE_CHECKING:
from airflow.utils.context import Context
class AnalyticDBSparkBaseOperator(BaseOperator):
"""Abstract base class that defines how users develop AnalyticDB Spark."""
def __init__(
self,
*,
adb_spark_conn_id: str = "adb_spark_default",
region: str | None = None,
polling_interval: int = 0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.app_id: str | None = None
self.polling_interval = polling_interval
self._adb_spark_conn_id = adb_spark_conn_id
self._region = region
self._adb_spark_hook: AnalyticDBSparkHook | None = None
@cached_property
def get_hook(self) -> AnalyticDBSparkHook:
"""Get valid hook."""
if self._adb_spark_hook is None or not isinstance(self._adb_spark_hook, AnalyticDBSparkHook):
self._adb_spark_hook = AnalyticDBSparkHook(
adb_spark_conn_id=self._adb_spark_conn_id, region=self._region
)
return self._adb_spark_hook
def execute(self, context: Context) -> Any:
...
def monitor_application(self):
self.log.info("Monitoring application with %s", self.app_id)
if self.polling_interval > 0:
self.poll_for_termination(self.app_id)
def poll_for_termination(self, app_id: str) -> None:
"""
Pool for spark application termination.
:param app_id: id of the spark application to monitor
"""
hook = self.get_hook
state = hook.get_spark_state(app_id)
while AppState(state) not in AnalyticDBSparkHook.TERMINAL_STATES:
self.log.debug("Application with id %s is in state: %s", app_id, state)
sleep(self.polling_interval)
state = hook.get_spark_state(app_id)
self.log.info("Application with id %s terminated with state: %s", app_id, state)
self.log.info(
"Web ui address is %s for application with id %s", hook.get_spark_web_ui_address(app_id), app_id
)
self.log.info(hook.get_spark_log(app_id))
if AppState(state) != AppState.COMPLETED:
raise AirflowException(f"Application {app_id} did not succeed")
def on_kill(self) -> None:
self.kill()
def kill(self) -> None:
"""Delete the specified application."""
if self.app_id is not None:
self.get_hook.kill_spark_app(self.app_id)
class AnalyticDBSparkSQLOperator(AnalyticDBSparkBaseOperator):
"""
Submits a Spark SQL application to the underlying cluster; wraps the AnalyticDB Spark REST API.
:param sql: The SQL query to execute.
:param conf: Spark configuration properties.
:param driver_resource_spec: The resource specifications of the Spark driver.
:param executor_resource_spec: The resource specifications of each Spark executor.
:param num_executors: number of executors to launch for this application.
:param name: name of this application.
:param cluster_id: The cluster ID of AnalyticDB MySQL 3.0 Data Lakehouse.
:param rg_name: The name of resource group in AnalyticDB MySQL 3.0 Data Lakehouse cluster.
"""
template_fields: Sequence[str] = ("spark_params",)
template_fields_renderers = {"spark_params": "json"}
def __init__(
self,
*,
sql: str,
conf: dict[Any, Any] | None = None,
driver_resource_spec: str | None = None,
executor_resource_spec: str | None = None,
num_executors: int | str | None = None,
name: str | None = None,
cluster_id: str,
rg_name: str,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.spark_params = {
"sql": sql,
"conf": conf,
"driver_resource_spec": driver_resource_spec,
"executor_resource_spec": executor_resource_spec,
"num_executors": num_executors,
"name": name,
}
self._cluster_id = cluster_id
self._rg_name = rg_name
def execute(self, context: Context) -> Any:
submit_response = self.get_hook.submit_spark_sql(
cluster_id=self._cluster_id, rg_name=self._rg_name, **self.spark_params
)
self.app_id = submit_response.body.data.app_id
self.monitor_application()
return self.app_id
class AnalyticDBSparkBatchOperator(AnalyticDBSparkBaseOperator):
"""
Submits a Spark batch application to the underlying cluster; wraps the AnalyticDB Spark REST API.
:param file: path of the file containing the application to execute.
:param class_name: name of the application Java/Spark main class.
:param args: application command line arguments.
:param conf: Spark configuration properties.
:param jars: jars to be used in this application.
:param py_files: python files to be used in this application.
:param files: files to be used in this application.
:param driver_resource_spec: The resource specifications of the Spark driver.
:param executor_resource_spec: The resource specifications of each Spark executor.
:param num_executors: number of executors to launch for this application.
:param archives: archives to be used in this application.
:param name: name of this application.
:param cluster_id: The cluster ID of AnalyticDB MySQL 3.0 Data Lakehouse.
:param rg_name: The name of resource group in AnalyticDB MySQL 3.0 Data Lakehouse cluster.
"""
template_fields: Sequence[str] = ("spark_params",)
template_fields_renderers = {"spark_params": "json"}
def __init__(
self,
*,
file: str,
class_name: str | None = None,
args: Sequence[str | int | float] | None = None,
conf: dict[Any, Any] | None = None,
jars: Sequence[str] | None = None,
py_files: Sequence[str] | None = None,
files: Sequence[str] | None = None,
driver_resource_spec: str | None = None,
executor_resource_spec: str | None = None,
num_executors: int | str | None = None,
archives: Sequence[str] | None = None,
name: str | None = None,
cluster_id: str,
rg_name: str,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.spark_params = {
"file": file,
"class_name": class_name,
"args": args,
"conf": conf,
"jars": jars,
"py_files": py_files,
"files": files,
"driver_resource_spec": driver_resource_spec,
"executor_resource_spec": executor_resource_spec,
"num_executors": num_executors,
"archives": archives,
"name": name,
}
self._cluster_id = cluster_id
self._rg_name = rg_name
def execute(self, context: Context) -> Any:
submit_response = self.get_hook.submit_spark_app(
cluster_id=self._cluster_id, rg_name=self._rg_name, **self.spark_params
)
self.app_id = submit_response.body.data.app_id
self.monitor_application()
return self.app_id
| 8,288 | 36.337838 | 108 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/hooks/oss.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import wraps
from inspect import signature
from typing import TYPE_CHECKING, Callable, TypeVar, cast
from urllib.parse import urlsplit
import oss2
from oss2.exceptions import ClientError
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
if TYPE_CHECKING:
from airflow.models.connection import Connection
T = TypeVar("T", bound=Callable)
def provide_bucket_name(func: T) -> T:
"""Function decorator that unifies bucket name and key is a key is provided but not a bucket name."""
function_signature = signature(func)
@wraps(func)
def wrapper(*args, **kwargs) -> T:
bound_args = function_signature.bind(*args, **kwargs)
self = args[0]
if bound_args.arguments.get("bucket_name") is None and self.oss_conn_id:
connection = self.get_connection(self.oss_conn_id)
if connection.schema:
bound_args.arguments["bucket_name"] = connection.schema
return func(*bound_args.args, **bound_args.kwargs)
return cast(T, wrapper)
def unify_bucket_name_and_key(func: T) -> T:
"""Function decorator that unifies bucket name and key is a key is provided but not a bucket name."""
function_signature = signature(func)
@wraps(func)
def wrapper(*args, **kwargs) -> T:
bound_args = function_signature.bind(*args, **kwargs)
def get_key() -> str:
if "key" in bound_args.arguments:
return "key"
raise ValueError("Missing key parameter!")
key_name = get_key()
if "bucket_name" not in bound_args.arguments or bound_args.arguments["bucket_name"] is None:
bound_args.arguments["bucket_name"], bound_args.arguments["key"] = OSSHook.parse_oss_url(
bound_args.arguments[key_name]
)
return func(*bound_args.args, **bound_args.kwargs)
return cast(T, wrapper)
class OSSHook(BaseHook):
"""Interact with Alibaba Cloud OSS, using the oss2 library."""
conn_name_attr = "alibabacloud_conn_id"
default_conn_name = "oss_default"
conn_type = "oss"
hook_name = "OSS"
def __init__(self, region: str | None = None, oss_conn_id="oss_default", *args, **kwargs) -> None:
self.oss_conn_id = oss_conn_id
self.oss_conn = self.get_connection(oss_conn_id)
self.region = self.get_default_region() if region is None else region
super().__init__(*args, **kwargs)
def get_conn(self) -> Connection:
"""Returns connection for the hook."""
return self.oss_conn
@staticmethod
def parse_oss_url(ossurl: str) -> tuple:
"""
Parses the OSS Url into a bucket name and key.
:param ossurl: The OSS Url to parse.
:return: the parsed bucket name and key
"""
parsed_url = urlsplit(ossurl)
if not parsed_url.netloc:
raise AirflowException(f'Please provide a bucket_name instead of "{ossurl}"')
bucket_name = parsed_url.netloc
key = parsed_url.path.lstrip("/")
return bucket_name, key
@provide_bucket_name
@unify_bucket_name_and_key
def object_exists(self, key: str, bucket_name: str | None = None) -> bool:
"""
Check if object exists.
:param key: the path of the object
:param bucket_name: the name of the bucket
:return: True if it exists and False if not.
"""
try:
return self.get_bucket(bucket_name).object_exists(key)
except ClientError as e:
self.log.error(e.message)
return False
@provide_bucket_name
def get_bucket(self, bucket_name: str | None = None) -> oss2.api.Bucket:
"""
Returns a oss2.Bucket object.
:param bucket_name: the name of the bucket
:return: the bucket object to the bucket name.
"""
auth = self.get_credential()
assert self.region is not None
return oss2.Bucket(auth, f"https://oss-{self.region}.aliyuncs.com", bucket_name)
@provide_bucket_name
@unify_bucket_name_and_key
def load_string(self, key: str, content: str, bucket_name: str | None = None) -> None:
"""
Loads a string to OSS.
:param key: the path of the object
:param content: str to set as content for the key.
:param bucket_name: the name of the bucket
"""
try:
self.get_bucket(bucket_name).put_object(key, content)
except Exception as e:
raise AirflowException(f"Errors: {e}")
@provide_bucket_name
@unify_bucket_name_and_key
def upload_local_file(
self,
key: str,
file: str,
bucket_name: str | None = None,
) -> None:
"""
Upload a local file to OSS.
:param key: the OSS path of the object
:param file: local file to upload.
:param bucket_name: the name of the bucket
"""
try:
self.get_bucket(bucket_name).put_object_from_file(key, file)
except Exception as e:
raise AirflowException(f"Errors when upload file: {e}")
@provide_bucket_name
@unify_bucket_name_and_key
def download_file(
self,
key: str,
local_file: str,
bucket_name: str | None = None,
) -> str | None:
"""
Download file from OSS.
:param key: key of the file-like object to download.
:param local_file: local path + file name to save.
:param bucket_name: the name of the bucket
:return: the file name.
"""
try:
self.get_bucket(bucket_name).get_object_to_file(key, local_file)
except Exception as e:
self.log.error(e)
return None
return local_file
@provide_bucket_name
@unify_bucket_name_and_key
def delete_object(
self,
key: str,
bucket_name: str | None = None,
) -> None:
"""
Delete object from OSS.
:param key: key of the object to delete.
:param bucket_name: the name of the bucket
"""
try:
self.get_bucket(bucket_name).delete_object(key)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when deleting: {key}")
@provide_bucket_name
@unify_bucket_name_and_key
def delete_objects(
self,
key: list,
bucket_name: str | None = None,
) -> None:
"""
Delete objects from OSS.
:param key: keys list of the objects to delete.
:param bucket_name: the name of the bucket
"""
try:
self.get_bucket(bucket_name).batch_delete_objects(key)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when deleting: {key}")
@provide_bucket_name
def delete_bucket(
self,
bucket_name: str | None = None,
) -> None:
"""
Delete bucket from OSS.
:param bucket_name: the name of the bucket
"""
try:
self.get_bucket(bucket_name).delete_bucket()
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when deleting: {bucket_name}")
@provide_bucket_name
def create_bucket(
self,
bucket_name: str | None = None,
) -> None:
"""
Create bucket.
:param bucket_name: the name of the bucket
"""
try:
self.get_bucket(bucket_name).create_bucket()
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when create bucket: {bucket_name}")
@provide_bucket_name
@unify_bucket_name_and_key
def append_string(self, bucket_name: str | None, content: str, key: str, pos: int) -> None:
"""
Append string to a remote existing file.
:param bucket_name: the name of the bucket
:param content: content to be appended
:param key: oss bucket key
:param pos: position of the existing file where the content will be appended
"""
self.log.info("Write oss bucket. key: %s, pos: %s", key, pos)
try:
self.get_bucket(bucket_name).append_object(key, pos, content)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when append string for object: {key}")
@provide_bucket_name
@unify_bucket_name_and_key
def read_key(self, bucket_name: str | None, key: str) -> str:
"""
Read oss remote object content with the specified key.
:param bucket_name: the name of the bucket
:param key: oss bucket key
"""
self.log.info("Read oss key: %s", key)
try:
return self.get_bucket(bucket_name).get_object(key).read().decode("utf-8")
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when read bucket object: {key}")
@provide_bucket_name
@unify_bucket_name_and_key
def head_key(self, bucket_name: str | None, key: str) -> oss2.models.HeadObjectResult:
"""
Get meta info of the specified remote object.
:param bucket_name: the name of the bucket
:param key: oss bucket key
"""
self.log.info("Head Object oss key: %s", key)
try:
return self.get_bucket(bucket_name).head_object(key)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when head bucket object: {key}")
@provide_bucket_name
@unify_bucket_name_and_key
def key_exist(self, bucket_name: str | None, key: str) -> bool:
"""
Find out whether the specified key exists in the oss remote storage.
:param bucket_name: the name of the bucket
:param key: oss bucket key
"""
# full_path = None
self.log.info("Looking up oss bucket %s for bucket key %s ...", bucket_name, key)
try:
return self.get_bucket(bucket_name).object_exists(key)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when check bucket object existence: {key}")
def get_credential(self) -> oss2.auth.Auth:
extra_config = self.oss_conn.extra_dejson
auth_type = extra_config.get("auth_type", None)
if not auth_type:
raise Exception("No auth_type specified in extra_config. ")
if auth_type != "AK":
raise Exception(f"Unsupported auth_type: {auth_type}")
oss_access_key_id = extra_config.get("access_key_id", None)
oss_access_key_secret = extra_config.get("access_key_secret", None)
if not oss_access_key_id:
raise Exception(f"No access_key_id is specified for connection: {self.oss_conn_id}")
if not oss_access_key_secret:
raise Exception(f"No access_key_secret is specified for connection: {self.oss_conn_id}")
return oss2.Auth(oss_access_key_id, oss_access_key_secret)
def get_default_region(self) -> str | None:
extra_config = self.oss_conn.extra_dejson
auth_type = extra_config.get("auth_type", None)
if not auth_type:
raise Exception("No auth_type specified in extra_config. ")
if auth_type != "AK":
raise Exception(f"Unsupported auth_type: {auth_type}")
default_region = extra_config.get("region", None)
if not default_region:
raise Exception(f"No region is specified for connection: {self.oss_conn_id}")
return default_region
| 12,520 | 32.932249 | 106 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/hooks/analyticdb_spark.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from enum import Enum
from typing import Any, Sequence
from alibabacloud_adb20211201.client import Client
from alibabacloud_adb20211201.models import (
GetSparkAppLogRequest,
GetSparkAppStateRequest,
GetSparkAppWebUiAddressRequest,
KillSparkAppRequest,
SubmitSparkAppRequest,
SubmitSparkAppResponse,
)
from alibabacloud_tea_openapi.models import Config
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class AppState(Enum):
"""
AnalyticDB Spark application states.
See:
https://www.alibabacloud.com/help/en/analyticdb-for-mysql/latest/api-doc-adb-2021-12-01-api-struct
-sparkappinfo.
"""
SUBMITTED = "SUBMITTED"
STARTING = "STARTING"
RUNNING = "RUNNING"
FAILING = "FAILING"
FAILED = "FAILED"
KILLING = "KILLING"
KILLED = "KILLED"
SUCCEEDING = "SUCCEEDING"
COMPLETED = "COMPLETED"
FATAL = "FATAL"
UNKNOWN = "UNKNOWN"
class AnalyticDBSparkHook(BaseHook, LoggingMixin):
"""
Hook for AnalyticDB MySQL Spark through the REST API.
:param adb_spark_conn_id: The Airflow connection used for AnalyticDB MySQL Spark credentials.
:param region: AnalyticDB MySQL region you want to submit spark application.
"""
TERMINAL_STATES = {AppState.COMPLETED, AppState.FAILED, AppState.FATAL, AppState.KILLED}
conn_name_attr = "alibabacloud_conn_id"
default_conn_name = "adb_spark_default"
conn_type = "adb_spark"
hook_name = "AnalyticDB Spark"
def __init__(
self, adb_spark_conn_id: str = "adb_spark_default", region: str | None = None, *args, **kwargs
) -> None:
self.adb_spark_conn_id = adb_spark_conn_id
self.adb_spark_conn = self.get_connection(adb_spark_conn_id)
self.region = self.get_default_region() if region is None else region
super().__init__(*args, **kwargs)
def submit_spark_app(
self, cluster_id: str, rg_name: str, *args: Any, **kwargs: Any
) -> SubmitSparkAppResponse:
"""
Perform request to submit spark application.
:param cluster_id: The cluster ID of AnalyticDB MySQL 3.0 Data Lakehouse.
:param rg_name: The name of resource group in AnalyticDB MySQL 3.0 Data Lakehouse cluster.
"""
self.log.info("Submitting application")
request = SubmitSparkAppRequest(
dbcluster_id=cluster_id,
resource_group_name=rg_name,
data=json.dumps(self.build_submit_app_data(*args, **kwargs)),
app_type="BATCH",
)
try:
return self.get_adb_spark_client().submit_spark_app(request)
except Exception as e:
self.log.error(e)
raise AirflowException("Errors when submit spark application") from e
def submit_spark_sql(
self, cluster_id: str, rg_name: str, *args: Any, **kwargs: Any
) -> SubmitSparkAppResponse:
"""
Perform request to submit spark sql.
:param cluster_id: The cluster ID of AnalyticDB MySQL 3.0 Data Lakehouse.
:param rg_name: The name of resource group in AnalyticDB MySQL 3.0 Data Lakehouse cluster.
"""
self.log.info("Submitting Spark SQL")
request = SubmitSparkAppRequest(
dbcluster_id=cluster_id,
resource_group_name=rg_name,
data=self.build_submit_sql_data(*args, **kwargs),
app_type="SQL",
)
try:
return self.get_adb_spark_client().submit_spark_app(request)
except Exception as e:
self.log.error(e)
raise AirflowException("Errors when submit spark sql") from e
def get_spark_state(self, app_id: str) -> str:
"""
Fetch the state of the specified spark application.
:param app_id: identifier of the spark application
"""
self.log.debug("Fetching state for spark application %s", app_id)
try:
return (
self.get_adb_spark_client()
.get_spark_app_state(GetSparkAppStateRequest(app_id=app_id))
.body.data.state
)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when fetching state for spark application: {app_id}") from e
def get_spark_web_ui_address(self, app_id: str) -> str:
"""
Fetch the web ui address of the specified spark application.
:param app_id: identifier of the spark application
"""
self.log.debug("Fetching web ui address for spark application %s", app_id)
try:
return (
self.get_adb_spark_client()
.get_spark_app_web_ui_address(GetSparkAppWebUiAddressRequest(app_id=app_id))
.body.data.web_ui_address
)
except Exception as e:
self.log.error(e)
raise AirflowException(
f"Errors when fetching web ui address for spark application: {app_id}"
) from e
def get_spark_log(self, app_id: str) -> str:
"""
Get the logs for a specified spark application.
:param app_id: identifier of the spark application
"""
self.log.debug("Fetching log for spark application %s", app_id)
try:
return (
self.get_adb_spark_client()
.get_spark_app_log(GetSparkAppLogRequest(app_id=app_id))
.body.data.log_content
)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when fetching log for spark application: {app_id}") from e
def kill_spark_app(self, app_id: str) -> None:
"""
Kill the specified spark application.
:param app_id: identifier of the spark application
"""
self.log.info("Killing spark application %s", app_id)
try:
self.get_adb_spark_client().kill_spark_app(KillSparkAppRequest(app_id=app_id))
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when killing spark application: {app_id}") from e
@staticmethod
def build_submit_app_data(
file: str | None = None,
class_name: str | None = None,
args: Sequence[str | int | float] | None = None,
conf: dict[Any, Any] | None = None,
jars: Sequence[str] | None = None,
py_files: Sequence[str] | None = None,
files: Sequence[str] | None = None,
driver_resource_spec: str | None = None,
executor_resource_spec: str | None = None,
num_executors: int | str | None = None,
archives: Sequence[str] | None = None,
name: str | None = None,
) -> dict:
"""
Build the submit application request data.
:param file: path of the file containing the application to execute.
:param class_name: name of the application Java/Spark main class.
:param args: application command line arguments.
:param conf: Spark configuration properties.
:param jars: jars to be used in this application.
:param py_files: python files to be used in this application.
:param files: files to be used in this application.
:param driver_resource_spec: The resource specifications of the Spark driver.
:param executor_resource_spec: The resource specifications of each Spark executor.
:param num_executors: number of executors to launch for this application.
:param archives: archives to be used in this application.
:param name: name of this application.
"""
if file is None:
raise ValueError("Parameter file is need when submit spark application.")
data: dict[str, Any] = {"file": file}
extra_conf: dict[str, str] = {}
if class_name:
data["className"] = class_name
if args and AnalyticDBSparkHook._validate_list_of_stringables(args):
data["args"] = [str(val) for val in args]
if driver_resource_spec:
extra_conf["spark.driver.resourceSpec"] = driver_resource_spec
if executor_resource_spec:
extra_conf["spark.executor.resourceSpec"] = executor_resource_spec
if num_executors:
extra_conf["spark.executor.instances"] = str(num_executors)
data["conf"] = extra_conf.copy()
if conf and AnalyticDBSparkHook._validate_extra_conf(conf):
data["conf"].update(conf)
if jars and AnalyticDBSparkHook._validate_list_of_stringables(jars):
data["jars"] = jars
if py_files and AnalyticDBSparkHook._validate_list_of_stringables(py_files):
data["pyFiles"] = py_files
if files and AnalyticDBSparkHook._validate_list_of_stringables(files):
data["files"] = files
if archives and AnalyticDBSparkHook._validate_list_of_stringables(archives):
data["archives"] = archives
if name:
data["name"] = name
return data
@staticmethod
def build_submit_sql_data(
sql: str | None = None,
conf: dict[Any, Any] | None = None,
driver_resource_spec: str | None = None,
executor_resource_spec: str | None = None,
num_executors: int | str | None = None,
name: str | None = None,
) -> str:
"""
Build the submit spark sql request data.
:param sql: The SQL query to execute. (templated)
:param conf: Spark configuration properties.
:param driver_resource_spec: The resource specifications of the Spark driver.
:param executor_resource_spec: The resource specifications of each Spark executor.
:param num_executors: number of executors to launch for this application.
:param name: name of this application.
"""
if sql is None:
raise ValueError("Parameter sql is need when submit spark sql.")
extra_conf: dict[str, str] = {}
formatted_conf = ""
if driver_resource_spec:
extra_conf["spark.driver.resourceSpec"] = driver_resource_spec
if executor_resource_spec:
extra_conf["spark.executor.resourceSpec"] = executor_resource_spec
if num_executors:
extra_conf["spark.executor.instances"] = str(num_executors)
if name:
extra_conf["spark.app.name"] = name
if conf and AnalyticDBSparkHook._validate_extra_conf(conf):
extra_conf.update(conf)
for key, value in extra_conf.items():
formatted_conf += f"set {key} = {value};"
return (formatted_conf + sql).strip()
@staticmethod
def _validate_list_of_stringables(vals: Sequence[str | int | float]) -> bool:
"""
Check the values in the provided list can be converted to strings.
:param vals: list to validate
"""
if (
vals is None
or not isinstance(vals, (tuple, list))
or any(1 for val in vals if not isinstance(val, (str, int, float)))
):
raise ValueError("List of strings expected")
return True
@staticmethod
def _validate_extra_conf(conf: dict[Any, Any]) -> bool:
"""
Check configuration values are either strings or ints.
:param conf: configuration variable
"""
if conf:
if not isinstance(conf, dict):
raise ValueError("'conf' argument must be a dict")
if any(True for k, v in conf.items() if not (v and isinstance(v, str) or isinstance(v, int))):
raise ValueError("'conf' values must be either strings or ints")
return True
def get_adb_spark_client(self) -> Client:
"""Get valid AnalyticDB MySQL Spark client."""
assert self.region is not None
extra_config = self.adb_spark_conn.extra_dejson
auth_type = extra_config.get("auth_type", None)
if not auth_type:
raise ValueError("No auth_type specified in extra_config.")
if auth_type != "AK":
raise ValueError(f"Unsupported auth_type: {auth_type}")
adb_spark_access_key_id = extra_config.get("access_key_id", None)
adb_spark_access_secret = extra_config.get("access_key_secret", None)
if not adb_spark_access_key_id:
raise ValueError(f"No access_key_id is specified for connection: {self.adb_spark_conn_id}")
if not adb_spark_access_secret:
raise ValueError(f"No access_key_secret is specified for connection: {self.adb_spark_conn_id}")
return Client(
Config(
access_key_id=adb_spark_access_key_id,
access_key_secret=adb_spark_access_secret,
endpoint=f"adb.{self.region}.aliyuncs.com",
)
)
def get_default_region(self) -> str | None:
"""Get default region from connection."""
extra_config = self.adb_spark_conn.extra_dejson
auth_type = extra_config.get("auth_type", None)
if not auth_type:
raise ValueError("No auth_type specified in extra_config. ")
if auth_type != "AK":
raise ValueError(f"Unsupported auth_type: {auth_type}")
default_region = extra_config.get("region", None)
if not default_region:
raise ValueError(f"No region is specified for connection: {self.adb_spark_conn}")
return default_region
| 14,367 | 37.937669 | 107 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/log/oss_task_handler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import os
import pathlib
import shutil
from functools import cached_property
from packaging.version import Version
from airflow.configuration import conf
from airflow.providers.alibaba.cloud.hooks.oss import OSSHook
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
def get_default_delete_local_copy():
"""Load delete_local_logs conf if Airflow version > 2.6 and return False if not.
TODO: delete this function when min airflow version >= 2.6
"""
from airflow.version import version
if Version(version) < Version("2.6"):
return False
return conf.getboolean("logging", "delete_local_logs")
class OSSTaskHandler(FileTaskHandler, LoggingMixin):
"""
OSSTaskHandler is a python log handler that handles and reads task instance logs.
Extends airflow FileTaskHandler and uploads to and reads from OSS remote storage.
"""
def __init__(self, base_log_folder, oss_log_folder, filename_template=None, **kwargs):
self.log.info("Using oss_task_handler for remote logging...")
super().__init__(base_log_folder, filename_template)
(self.bucket_name, self.base_folder) = OSSHook.parse_oss_url(oss_log_folder)
self.log_relative_path = ""
self._hook = None
self.closed = False
self.upload_on_close = True
self.delete_local_copy = (
kwargs["delete_local_copy"] if "delete_local_copy" in kwargs else get_default_delete_local_copy()
)
@cached_property
def hook(self):
remote_conn_id = conf.get("logging", "REMOTE_LOG_CONN_ID")
self.log.info("remote_conn_id: %s", remote_conn_id)
try:
return OSSHook(oss_conn_id=remote_conn_id)
except Exception as e:
self.log.exception(e)
self.log.error(
'Could not create an OSSHook with connection id "%s". '
"Please make sure that airflow[oss] is installed and "
"the OSS connection exists.",
remote_conn_id,
)
def set_context(self, ti):
"""This function is used to set the context of the handler."""
super().set_context(ti)
# Local location and remote location is needed to open and
# upload local log file to OSS remote storage.
self.log_relative_path = self._render_filename(ti, ti.try_number)
self.upload_on_close = not ti.raw
# Clear the file first so that duplicate data is not uploaded
# when re-using the same path (e.g. with rescheduled sensors)
if self.upload_on_close:
with open(self.handler.baseFilename, "w"):
pass
def close(self):
"""Close and upload local log file to remote storage OSS."""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = self.log_relative_path
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
log = pathlib.Path(local_loc).read_text()
oss_write = self.oss_write(log, remote_loc)
if oss_write and self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read(self, ti, try_number, metadata=None):
"""
Read logs of given task instance and try_number from OSS remote storage.
If failed, read the log from task instance host machine.
:param ti: task instance object
:param try_number: task instance try_number to read logs from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
"""
# Explicitly getting log relative path is necessary as the given
# task instance might be different from task instance passed in
# set_context method.
log_relative_path = self._render_filename(ti, try_number)
remote_loc = log_relative_path
if not self.oss_log_exists(remote_loc):
return super()._read(ti, try_number, metadata)
# If OSS remote file exists, we do not fetch logs from task instance
# local machine even if there are errors reading remote logs, as
# returned remote_log will contain error messages.
remote_log = self.oss_read(remote_loc, return_error=True)
log = f"*** Reading remote log from {remote_loc}.\n{remote_log}\n"
return log, {"end_of_log": True}
def oss_log_exists(self, remote_log_location):
"""
Check if remote_log_location exists in remote storage.
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
oss_remote_log_location = f"{self.base_folder}/{remote_log_location}"
with contextlib.suppress(Exception):
return self.hook.key_exist(self.bucket_name, oss_remote_log_location)
return False
def oss_read(self, remote_log_location, return_error=False):
"""
Returns the log at the remote_log_location. Returns '' if no logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:param return_error: if True, returns a string error message if an
error occurs. Otherwise, returns '' when an error occurs.
"""
try:
oss_remote_log_location = f"{self.base_folder}/{remote_log_location}"
self.log.info("read remote log: %s", oss_remote_log_location)
return self.hook.read_key(self.bucket_name, oss_remote_log_location)
except Exception:
msg = f"Could not read logs from {oss_remote_log_location}"
self.log.exception(msg)
# return error if needed
if return_error:
return msg
def oss_write(self, log, remote_log_location, append=True) -> bool:
"""
Write the log to remote_log_location and return `True`; fails silently and returns `False` on error.
:param log: the log to write to the remote_log_location
:param remote_log_location: the log's location in remote storage
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:return: whether the log is successfully written to remote location or not.
"""
oss_remote_log_location = f"{self.base_folder}/{remote_log_location}"
pos = 0
if append and self.oss_log_exists(oss_remote_log_location):
head = self.hook.head_key(self.bucket_name, oss_remote_log_location)
pos = head.content_length
self.log.info("log write pos is: %s", str(pos))
try:
self.log.info("writing remote log: %s", oss_remote_log_location)
self.hook.append_string(self.bucket_name, log, oss_remote_log_location, pos)
except Exception:
self.log.exception(
"Could not write logs to %s, log write pos is: %s, Append is %s",
oss_remote_log_location,
str(pos),
str(append),
)
return False
return True
| 8,577 | 40.843902 | 109 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/log/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/sensors/oss_key.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from urllib.parse import urlsplit
from airflow.exceptions import AirflowException
from airflow.providers.alibaba.cloud.hooks.oss import OSSHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class OSSKeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on OSS) to be present in an OSS bucket.
OSS being a key/value, it does not support folders. The path is just a key resource.
:param bucket_key: The key being waited on. Supports full oss:// style url
or relative path from root level. When it's specified as a full oss://
url, please leave bucket_name as `None`.
:param region: OSS region
:param bucket_name: OSS bucket name
:param oss_conn_id: The Airflow connection used for OSS credentials.
"""
template_fields: Sequence[str] = ("bucket_key", "bucket_name")
def __init__(
self,
bucket_key: str,
region: str,
bucket_name: str | None = None,
oss_conn_id: str | None = "oss_default",
**kwargs,
):
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.region = region
self.oss_conn_id = oss_conn_id
self.hook: OSSHook | None = None
def poke(self, context: Context):
"""
Check if the object exists in the bucket to pull key.
:param self: the object itself
:param context: the context of the object
:returns: True if the object exists, False otherwise
"""
if self.bucket_name is None:
parsed_url = urlsplit(self.bucket_key)
if parsed_url.netloc == "":
raise AirflowException("If key is a relative path from root, please provide a bucket_name")
self.bucket_name = parsed_url.netloc
self.bucket_key = parsed_url.path.lstrip("/")
else:
parsed_url = urlsplit(self.bucket_key)
if parsed_url.scheme != "" or parsed_url.netloc != "":
raise AirflowException(
"If bucket_name is provided, bucket_key"
" should be relative path from root"
" level, rather than a full oss:// url"
)
self.log.info("Poking for key : oss://%s/%s", self.bucket_name, self.bucket_key)
return self.get_hook.object_exists(key=self.bucket_key, bucket_name=self.bucket_name)
@cached_property
def get_hook(self) -> OSSHook:
"""Create and return an OSSHook."""
if self.hook:
return self.hook
self.hook = OSSHook(oss_conn_id=self.oss_conn_id, region=self.region)
return self.hook
| 3,656 | 36.316327 | 107 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/alibaba/cloud/sensors/analyticdb_spark.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.alibaba.cloud.hooks.analyticdb_spark import AnalyticDBSparkHook, AppState
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class AnalyticDBSparkSensor(BaseSensorOperator):
"""
Monitor a AnalyticDB Spark session for termination.
:param app_id: identifier of the monitored app depends on the option that's being modified.
:param adb_spark_conn_id: reference to a pre-defined ADB Spark connection.
:param region: AnalyticDB MySQL region you want to submit spark application.
"""
template_fields: Sequence[str] = ("app_id",)
def __init__(
self,
*,
app_id: str,
adb_spark_conn_id: str = "adb_spark_default",
region: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.app_id = app_id
self._region = region
self._adb_spark_conn_id = adb_spark_conn_id
self._adb_spark_hook: AnalyticDBSparkHook | None = None
@cached_property
def get_hook(self) -> AnalyticDBSparkHook:
"""Get valid hook."""
if self._adb_spark_hook is None or not isinstance(self._adb_spark_hook, AnalyticDBSparkHook):
self._adb_spark_hook = AnalyticDBSparkHook(
adb_spark_conn_id=self._adb_spark_conn_id, region=self._region
)
return self._adb_spark_hook
def poke(self, context: Context) -> bool:
app_id = self.app_id
state = self.get_hook.get_spark_state(app_id)
return AppState(state) in AnalyticDBSparkHook.TERMINAL_STATES
| 2,543 | 35.869565 | 101 | py |
airflow | airflow-main/airflow/providers/http/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.5.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-http:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,529 | 35.428571 | 113 | py |
airflow | airflow-main/airflow/providers/http/operators/http.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import base64
import pickle
from typing import TYPE_CHECKING, Any, Callable, Sequence
from requests import Response
from requests.auth import AuthBase
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.http.hooks.http import HttpHook
from airflow.providers.http.triggers.http import HttpTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
class SimpleHttpOperator(BaseOperator):
"""
Calls an endpoint on an HTTP system to execute an action.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SimpleHttpOperator`
:param http_conn_id: The :ref:`http connection<howto/connection:http>` to run
the operator against
:param endpoint: The relative part of the full url. (templated)
:param method: The HTTP method to use, default = "POST"
:param data: The data to pass. POST-data in POST/PUT and params
in the URL for a GET request. (templated)
:param headers: The HTTP headers to be added to the GET request
:param response_check: A check against the 'requests' response object.
The callable takes the response object as the first positional argument
and optionally any number of keyword arguments available in the context dictionary.
It should return True for 'pass' and False otherwise.
:param response_filter: A function allowing you to manipulate the response
text. e.g response_filter=lambda response: json.loads(response.text).
The callable takes the response object as the first positional argument
and optionally any number of keyword arguments available in the context dictionary.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:param log_response: Log the response (default: False)
:param auth_type: The auth type for the service
:param tcp_keep_alive: Enable TCP Keep Alive for the connection.
:param tcp_keep_alive_idle: The TCP Keep Alive Idle parameter (corresponds to ``socket.TCP_KEEPIDLE``).
:param tcp_keep_alive_count: The TCP Keep Alive count parameter (corresponds to ``socket.TCP_KEEPCNT``)
:param tcp_keep_alive_interval: The TCP Keep Alive interval parameter (corresponds to
``socket.TCP_KEEPINTVL``)
:param deferrable: Run operator in the deferrable mode
"""
template_fields: Sequence[str] = (
"endpoint",
"data",
"headers",
)
template_fields_renderers = {"headers": "json", "data": "py"}
template_ext: Sequence[str] = ()
ui_color = "#f4a460"
def __init__(
self,
*,
endpoint: str | None = None,
method: str = "POST",
data: Any = None,
headers: dict[str, str] | None = None,
response_check: Callable[..., bool] | None = None,
response_filter: Callable[..., Any] | None = None,
extra_options: dict[str, Any] | None = None,
http_conn_id: str = "http_default",
log_response: bool = False,
auth_type: type[AuthBase] | None = None,
tcp_keep_alive: bool = True,
tcp_keep_alive_idle: int = 120,
tcp_keep_alive_count: int = 20,
tcp_keep_alive_interval: int = 30,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.http_conn_id = http_conn_id
self.method = method
self.endpoint = endpoint
self.headers = headers or {}
self.data = data or {}
self.response_check = response_check
self.response_filter = response_filter
self.extra_options = extra_options or {}
self.log_response = log_response
self.auth_type = auth_type
self.tcp_keep_alive = tcp_keep_alive
self.tcp_keep_alive_idle = tcp_keep_alive_idle
self.tcp_keep_alive_count = tcp_keep_alive_count
self.tcp_keep_alive_interval = tcp_keep_alive_interval
self.deferrable = deferrable
def execute(self, context: Context) -> Any:
if self.deferrable:
self.defer(
trigger=HttpTrigger(
http_conn_id=self.http_conn_id,
auth_type=self.auth_type,
method=self.method,
endpoint=self.endpoint,
headers=self.headers,
data=self.data,
extra_options=self.extra_options,
),
method_name="execute_complete",
)
else:
http = HttpHook(
self.method,
http_conn_id=self.http_conn_id,
auth_type=self.auth_type,
tcp_keep_alive=self.tcp_keep_alive,
tcp_keep_alive_idle=self.tcp_keep_alive_idle,
tcp_keep_alive_count=self.tcp_keep_alive_count,
tcp_keep_alive_interval=self.tcp_keep_alive_interval,
)
self.log.info("Calling HTTP method")
response = http.run(self.endpoint, self.data, self.headers, self.extra_options)
return self.process_response(context=context, response=response)
def process_response(self, context: Context, response: Response) -> str:
"""Process the response."""
from airflow.utils.operator_helpers import determine_kwargs
if self.log_response:
self.log.info(response.text)
if self.response_check:
kwargs = determine_kwargs(self.response_check, [response], context)
if not self.response_check(response, **kwargs):
raise AirflowException("Response check returned False.")
if self.response_filter:
kwargs = determine_kwargs(self.response_filter, [response], context)
return self.response_filter(response, **kwargs)
return response.text
def execute_complete(self, context: Context, event: dict):
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "success":
response = pickle.loads(base64.standard_b64decode(event["response"]))
return self.process_response(context=context, response=response)
else:
raise AirflowException(f"Unexpected error in the operation: {event['message']}")
| 7,470 | 41.936782 | 107 | py |
airflow | airflow-main/airflow/providers/http/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/http/triggers/http.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import base64
import pickle
from typing import Any, AsyncIterator
import requests
from aiohttp.client_reqrep import ClientResponse
from requests.cookies import RequestsCookieJar
from requests.structures import CaseInsensitiveDict
from airflow.providers.http.hooks.http import HttpAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class HttpTrigger(BaseTrigger):
"""
HttpTrigger run on the trigger worker.
:param http_conn_id: http connection id that has the base
API url i.e https://www.google.com/ and optional authentication credentials. Default
headers can also be specified in the Extra field in json format.
:param auth_type: The auth type for the service
:param method: the API method to be called
:param endpoint: Endpoint to be called, i.e. ``resource/v1/query?``.
:param headers: Additional headers to be passed through as a dict.
:param data: Payload to be uploaded or request parameters.
:param extra_options: Additional kwargs to pass when creating a request.
For example, ``run(json=obj)`` is passed as
``aiohttp.ClientSession().get(json=obj)``.
2XX or 3XX status codes
"""
def __init__(
self,
http_conn_id: str = "http_default",
auth_type: Any = None,
method: str = "POST",
endpoint: str | None = None,
headers: dict[str, str] | None = None,
data: Any = None,
extra_options: dict[str, Any] | None = None,
):
super().__init__()
self.http_conn_id = http_conn_id
self.method = method
self.auth_type = auth_type
self.endpoint = endpoint
self.headers = headers
self.data = data
self.extra_options = extra_options
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes HttpTrigger arguments and classpath."""
return (
"airflow.providers.http.triggers.http.HttpTrigger",
{
"http_conn_id": self.http_conn_id,
"method": self.method,
"auth_type": self.auth_type,
"endpoint": self.endpoint,
"headers": self.headers,
"data": self.data,
"extra_options": self.extra_options,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Makes a series of asynchronous http calls via an http hook."""
hook = HttpAsyncHook(
method=self.method,
http_conn_id=self.http_conn_id,
auth_type=self.auth_type,
)
try:
client_response = await hook.run(
endpoint=self.endpoint,
data=self.data,
headers=self.headers,
extra_options=self.extra_options,
)
response = await self._convert_response(client_response)
yield TriggerEvent(
{
"status": "success",
"response": base64.standard_b64encode(pickle.dumps(response)).decode("ascii"),
}
)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
# yield TriggerEvent({"status": "error", "message": str(traceback.format_exc())})
@staticmethod
async def _convert_response(client_response: ClientResponse) -> requests.Response:
"""Convert aiohttp.client_reqrep.ClientResponse to requests.Response."""
response = requests.Response()
response._content = await client_response.read()
response.status_code = client_response.status
response.headers = CaseInsensitiveDict(client_response.headers)
response.url = str(client_response.url)
response.history = [await HttpTrigger._convert_response(h) for h in client_response.history]
response.encoding = client_response.get_encoding()
response.reason = str(client_response.reason)
cookies = RequestsCookieJar()
for (k, v) in client_response.cookies.items():
cookies.set(k, v)
response.cookies = cookies
return response
| 4,988 | 38.912 | 100 | py |
airflow | airflow-main/airflow/providers/http/triggers/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/http/hooks/http.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any, Callable
import aiohttp
import requests
import tenacity
from aiohttp import ClientResponseError
from asgiref.sync import sync_to_async
from requests.auth import HTTPBasicAuth
from requests_toolbelt.adapters.socket_options import TCPKeepAliveAdapter
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
if TYPE_CHECKING:
from aiohttp.client_reqrep import ClientResponse
class HttpHook(BaseHook):
"""Interact with HTTP servers.
:param method: the API method to be called
:param http_conn_id: :ref:`http connection<howto/connection:http>` that has the base
API url i.e https://www.google.com/ and optional authentication credentials. Default
headers can also be specified in the Extra field in json format.
:param auth_type: The auth type for the service
:param tcp_keep_alive: Enable TCP Keep Alive for the connection.
:param tcp_keep_alive_idle: The TCP Keep Alive Idle parameter (corresponds to ``socket.TCP_KEEPIDLE``).
:param tcp_keep_alive_count: The TCP Keep Alive count parameter (corresponds to ``socket.TCP_KEEPCNT``)
:param tcp_keep_alive_interval: The TCP Keep Alive interval parameter (corresponds to
``socket.TCP_KEEPINTVL``)
:param auth_args: extra arguments used to initialize the auth_type if different than default HTTPBasicAuth
"""
conn_name_attr = "http_conn_id"
default_conn_name = "http_default"
conn_type = "http"
hook_name = "HTTP"
def __init__(
self,
method: str = "POST",
http_conn_id: str = default_conn_name,
auth_type: Any = None,
tcp_keep_alive: bool = True,
tcp_keep_alive_idle: int = 120,
tcp_keep_alive_count: int = 20,
tcp_keep_alive_interval: int = 30,
) -> None:
super().__init__()
self.http_conn_id = http_conn_id
self.method = method.upper()
self.base_url: str = ""
self._retry_obj: Callable[..., Any]
self._auth_type: Any = auth_type
self.tcp_keep_alive = tcp_keep_alive
self.keep_alive_idle = tcp_keep_alive_idle
self.keep_alive_count = tcp_keep_alive_count
self.keep_alive_interval = tcp_keep_alive_interval
@property
def auth_type(self):
return self._auth_type or HTTPBasicAuth
@auth_type.setter
def auth_type(self, v):
self._auth_type = v
# headers may be passed through directly or in the "extra" field in the connection
# definition
def get_conn(self, headers: dict[Any, Any] | None = None) -> requests.Session:
"""Create a Requests HTTP session.
:param headers: additional headers to be passed through as a dictionary
"""
session = requests.Session()
if self.http_conn_id:
conn = self.get_connection(self.http_conn_id)
if conn.host and "://" in conn.host:
self.base_url = conn.host
else:
# schema defaults to HTTP
schema = conn.schema if conn.schema else "http"
host = conn.host if conn.host else ""
self.base_url = schema + "://" + host
if conn.port:
self.base_url = self.base_url + ":" + str(conn.port)
if conn.login:
session.auth = self.auth_type(conn.login, conn.password)
elif self._auth_type:
session.auth = self.auth_type()
if conn.extra:
try:
session.headers.update(conn.extra_dejson)
except TypeError:
self.log.warning("Connection to %s has invalid extra field.", conn.host)
if headers:
session.headers.update(headers)
return session
def run(
self,
endpoint: str | None = None,
data: dict[str, Any] | str | None = None,
headers: dict[str, Any] | None = None,
extra_options: dict[str, Any] | None = None,
**request_kwargs: Any,
) -> Any:
r"""Perform the request.
:param endpoint: the endpoint to be called i.e. resource/v1/query?
:param data: payload to be uploaded or request parameters
:param headers: additional headers to be passed through as a dictionary
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non
2XX or 3XX status codes
:param request_kwargs: Additional kwargs to pass when creating a request.
For example, ``run(json=obj)`` is passed as ``requests.Request(json=obj)``
"""
extra_options = extra_options or {}
session = self.get_conn(headers)
url = self.url_from_endpoint(endpoint)
if self.tcp_keep_alive:
keep_alive_adapter = TCPKeepAliveAdapter(
idle=self.keep_alive_idle, count=self.keep_alive_count, interval=self.keep_alive_interval
)
session.mount(url, keep_alive_adapter)
if self.method == "GET":
# GET uses params
req = requests.Request(self.method, url, params=data, headers=headers, **request_kwargs)
elif self.method == "HEAD":
# HEAD doesn't use params
req = requests.Request(self.method, url, headers=headers, **request_kwargs)
else:
# Others use data
req = requests.Request(self.method, url, data=data, headers=headers, **request_kwargs)
prepped_request = session.prepare_request(req)
self.log.debug("Sending '%s' to url: %s", self.method, url)
return self.run_and_check(session, prepped_request, extra_options)
def check_response(self, response: requests.Response) -> None:
"""Check the status code and raise on failure.
:param response: A requests response object.
:raise AirflowException: If the response contains a status code not
in the 2xx and 3xx range.
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
self.log.error("HTTP error: %s", response.reason)
self.log.error(response.text)
raise AirflowException(str(response.status_code) + ":" + response.reason)
def run_and_check(
self,
session: requests.Session,
prepped_request: requests.PreparedRequest,
extra_options: dict[Any, Any],
) -> Any:
"""Grab extra options, actually run the request, and check the result.
:param session: the session to be used to execute the request
:param prepped_request: the prepared request generated in run()
:param extra_options: additional options to be used when executing the request
i.e. ``{'check_response': False}`` to avoid checking raising exceptions on non 2XX
or 3XX status codes
"""
extra_options = extra_options or {}
settings = session.merge_environment_settings(
prepped_request.url,
proxies=extra_options.get("proxies", {}),
stream=extra_options.get("stream", False),
verify=extra_options.get("verify"),
cert=extra_options.get("cert"),
)
# Send the request.
send_kwargs: dict[str, Any] = {
"timeout": extra_options.get("timeout"),
"allow_redirects": extra_options.get("allow_redirects", True),
}
send_kwargs.update(settings)
try:
response = session.send(prepped_request, **send_kwargs)
if extra_options.get("check_response", True):
self.check_response(response)
return response
except requests.exceptions.ConnectionError as ex:
self.log.warning("%s Tenacity will retry to execute the operation", ex)
raise ex
def run_with_advanced_retry(self, _retry_args: dict[Any, Any], *args: Any, **kwargs: Any) -> Any:
"""Run the hook with retry.
This is useful for connectors which might be disturbed by intermittent
issues and should not instantly fail.
:param _retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
.. code-block:: python
hook = HttpHook(http_conn_id="my_conn", method="GET")
retry_args = dict(
wait=tenacity.wait_exponential(),
stop=tenacity.stop_after_attempt(10),
retry=tenacity.retry_if_exception_type(Exception),
)
hook.run_with_advanced_retry(endpoint="v1/test", _retry_args=retry_args)
"""
self._retry_obj = tenacity.Retrying(**_retry_args)
return self._retry_obj(self.run, *args, **kwargs)
def url_from_endpoint(self, endpoint: str | None) -> str:
"""Combine base url with endpoint."""
if self.base_url and not self.base_url.endswith("/") and endpoint and not endpoint.startswith("/"):
return self.base_url + "/" + endpoint
return (self.base_url or "") + (endpoint or "")
def test_connection(self):
"""Test HTTP Connection."""
try:
self.run()
return True, "Connection successfully tested"
except Exception as e:
return False, str(e)
class HttpAsyncHook(BaseHook):
"""Interact with HTTP servers asynchronously.
:param method: the API method to be called
:param http_conn_id: http connection id that has the base
API url i.e https://www.google.com/ and optional authentication credentials. Default
headers can also be specified in the Extra field in json format.
:param auth_type: The auth type for the service
"""
conn_name_attr = "http_conn_id"
default_conn_name = "http_default"
conn_type = "http"
hook_name = "HTTP"
def __init__(
self,
method: str = "POST",
http_conn_id: str = default_conn_name,
auth_type: Any = aiohttp.BasicAuth,
retry_limit: int = 3,
retry_delay: float = 1.0,
) -> None:
self.http_conn_id = http_conn_id
self.method = method.upper()
self.base_url: str = ""
self._retry_obj: Callable[..., Any]
self.auth_type: Any = auth_type
if retry_limit < 1:
raise ValueError("Retry limit must be greater than equal to 1")
self.retry_limit = retry_limit
self.retry_delay = retry_delay
async def run(
self,
endpoint: str | None = None,
data: dict[str, Any] | str | None = None,
headers: dict[str, Any] | None = None,
extra_options: dict[str, Any] | None = None,
) -> ClientResponse:
"""Perform an asynchronous HTTP request call.
:param endpoint: Endpoint to be called, i.e. ``resource/v1/query?``.
:param data: Payload to be uploaded or request parameters.
:param headers: Additional headers to be passed through as a dict.
:param extra_options: Additional kwargs to pass when creating a request.
For example, ``run(json=obj)`` is passed as
``aiohttp.ClientSession().get(json=obj)``.
"""
extra_options = extra_options or {}
# headers may be passed through directly or in the "extra" field in the connection
# definition
_headers = {}
auth = None
if self.http_conn_id:
conn = await sync_to_async(self.get_connection)(self.http_conn_id)
if conn.host and "://" in conn.host:
self.base_url = conn.host
else:
# schema defaults to HTTP
schema = conn.schema if conn.schema else "http"
host = conn.host if conn.host else ""
self.base_url = schema + "://" + host
if conn.port:
self.base_url = self.base_url + ":" + str(conn.port)
if conn.login:
auth = self.auth_type(conn.login, conn.password)
if conn.extra:
try:
_headers.update(conn.extra_dejson)
except TypeError:
self.log.warning("Connection to %s has invalid extra field.", conn.host)
if headers:
_headers.update(headers)
if self.base_url and not self.base_url.endswith("/") and endpoint and not endpoint.startswith("/"):
url = self.base_url + "/" + endpoint
else:
url = (self.base_url or "") + (endpoint or "")
async with aiohttp.ClientSession() as session:
if self.method == "GET":
request_func = session.get
elif self.method == "POST":
request_func = session.post
elif self.method == "PATCH":
request_func = session.patch
elif self.method == "HEAD":
request_func = session.head
elif self.method == "PUT":
request_func = session.put
elif self.method == "DELETE":
request_func = session.delete
elif self.method == "OPTIONS":
request_func = session.options
else:
raise AirflowException(f"Unexpected HTTP Method: {self.method}")
attempt_num = 1
while True:
response = await request_func(
url,
json=data if self.method in ("POST", "PATCH") else None,
params=data if self.method == "GET" else None,
headers=_headers,
auth=auth,
**extra_options,
)
try:
response.raise_for_status()
return response
except ClientResponseError as e:
self.log.warning(
"[Try %d of %d] Request to %s failed.",
attempt_num,
self.retry_limit,
url,
)
if not self._retryable_error_async(e) or attempt_num == self.retry_limit:
self.log.exception("HTTP error with status: %s", e.status)
# In this case, the user probably made a mistake.
# Don't retry.
raise AirflowException(f"{e.status}:{e.message}")
attempt_num += 1
await asyncio.sleep(self.retry_delay)
def _retryable_error_async(self, exception: ClientResponseError) -> bool:
"""Determine whether an exception may successful on a subsequent attempt.
It considers the following to be retryable:
- requests_exceptions.ConnectionError
- requests_exceptions.Timeout
- anything with a status code >= 500
Most retryable errors are covered by status code >= 500.
"""
if exception.status == 429:
# don't retry for too Many Requests
return False
if exception.status == 413:
# don't retry for payload Too Large
return False
return exception.status >= 500
| 16,172 | 38.159806 | 110 | py |
airflow | airflow-main/airflow/providers/http/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/http/sensors/http.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class HttpSensor(BaseSensorOperator):
"""
Execute HTTP GET statement; return False on failure 404 Not Found or `response_check` returning False.
HTTP Error codes other than 404 (like 403) or Connection Refused Error
would raise an exception and fail the sensor itself directly (no more poking).
To avoid failing the task for other codes than 404, the argument ``extra_option``
can be passed with the value ``{'check_response': False}``. It will make the ``response_check``
be execute for any http status code.
The response check can access the template context to the operator:
.. code-block:: python
def response_check(response, task_instance):
# The task_instance is injected, so you can pull data form xcom
# Other context variables such as dag, ds, execution_date are also available.
xcom_data = task_instance.xcom_pull(task_ids="pushing_task")
# In practice you would do something more sensible with this data..
print(xcom_data)
return True
HttpSensor(task_id="my_http_sensor", ..., response_check=response_check)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:HttpSensor`
:param http_conn_id: The :ref:`http connection<howto/connection:http>` to run the
sensor against
:param method: The HTTP request method to use
:param endpoint: The relative part of the full url
:param request_params: The parameters to be added to the GET url
:param headers: The HTTP headers to be added to the GET request
:param response_check: A check against the 'requests' response object.
The callable takes the response object as the first positional argument
and optionally any number of keyword arguments available in the context dictionary.
It should return True for 'pass' and False otherwise.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:param tcp_keep_alive: Enable TCP Keep Alive for the connection.
:param tcp_keep_alive_idle: The TCP Keep Alive Idle parameter (corresponds to ``socket.TCP_KEEPIDLE``).
:param tcp_keep_alive_count: The TCP Keep Alive count parameter (corresponds to ``socket.TCP_KEEPCNT``)
:param tcp_keep_alive_interval: The TCP Keep Alive interval parameter (corresponds to
``socket.TCP_KEEPINTVL``)
"""
template_fields: Sequence[str] = ("endpoint", "request_params", "headers")
def __init__(
self,
*,
endpoint: str,
http_conn_id: str = "http_default",
method: str = "GET",
request_params: dict[str, Any] | None = None,
headers: dict[str, Any] | None = None,
response_check: Callable[..., bool] | None = None,
extra_options: dict[str, Any] | None = None,
tcp_keep_alive: bool = True,
tcp_keep_alive_idle: int = 120,
tcp_keep_alive_count: int = 20,
tcp_keep_alive_interval: int = 30,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.method = method
self.request_params = request_params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.tcp_keep_alive = tcp_keep_alive
self.tcp_keep_alive_idle = tcp_keep_alive_idle
self.tcp_keep_alive_count = tcp_keep_alive_count
self.tcp_keep_alive_interval = tcp_keep_alive_interval
def poke(self, context: Context) -> bool:
from airflow.utils.operator_helpers import determine_kwargs
hook = HttpHook(
method=self.method,
http_conn_id=self.http_conn_id,
tcp_keep_alive=self.tcp_keep_alive,
tcp_keep_alive_idle=self.tcp_keep_alive_idle,
tcp_keep_alive_count=self.tcp_keep_alive_count,
tcp_keep_alive_interval=self.tcp_keep_alive_interval,
)
self.log.info("Poking: %s", self.endpoint)
try:
response = hook.run(
self.endpoint,
data=self.request_params,
headers=self.headers,
extra_options=self.extra_options,
)
if self.response_check:
kwargs = determine_kwargs(self.response_check, [response], context)
return self.response_check(response, **kwargs)
except AirflowException as exc:
if str(exc).startswith("404"):
return False
raise exc
return True
| 5,864 | 41.194245 | 107 | py |
airflow | airflow-main/airflow/providers/http/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "8.3.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-amazon:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,531 | 35.47619 | 115 | py |
airflow | airflow-main/airflow/providers/amazon/aws/exceptions.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.exceptions import AirflowException
# Note: Any AirflowException raised is expected to cause the TaskInstance
# to be marked in an ERROR state
class EcsTaskFailToStart(Exception):
"""Raise when ECS tasks fail to start AFTER processing the request."""
def __init__(self, message: str):
self.message = message
super().__init__(message)
def __reduce__(self):
return EcsTaskFailToStart, (self.message)
class EcsOperatorError(Exception):
"""Raise when ECS cannot handle the request."""
def __init__(self, failures: list, message: str):
self.failures = failures
self.message = message
super().__init__(message)
def __reduce__(self):
return EcsOperatorError, (self.failures, self.message)
class S3HookUriParseFailure(AirflowException):
"""When parse_s3_url fails to parse URL, this error is thrown."""
| 1,731 | 32.960784 | 74 | py |
airflow | airflow-main/airflow/providers/amazon/aws/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/secrets/systems_manager.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects relating to sourcing connections from AWS SSM Parameter Store."""
from __future__ import annotations
import re
from functools import cached_property
from airflow.providers.amazon.aws.utils import trim_none_values
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
class SystemsManagerParameterStoreBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connection or Variables from AWS SSM Parameter Store.
Configurable via ``airflow.cfg`` like so:
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
backend_kwargs = {"connections_prefix": "/airflow/connections", "profile_name": null}
For example, if ssm path is ``/airflow/connections/smtp_default``, this would be accessible
if you provide ``{"connections_prefix": "/airflow/connections"}`` and request conn_id ``smtp_default``.
And if ssm path is ``/airflow/variables/hello``, this would be accessible
if you provide ``{"variables_prefix": "/airflow/variables"}`` and variable key ``hello``.
:param connections_prefix: Specifies the prefix of the secret to read to get Connections.
If set to None (null), requests for connections will not be sent to AWS SSM Parameter Store.
:param connections_lookup_pattern: Specifies a pattern the connection ID needs to match to be looked up in
AWS Parameter Store. Applies only if `connections_prefix` is not None.
If set to None (null value in the configuration), all connections will be looked up first in
AWS Parameter Store.
:param variables_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null), requests for variables will not be sent to AWS SSM Parameter Store.
:param variables_lookup_pattern: Specifies a pattern the variable key needs to match to be looked up in
AWS Parameter Store. Applies only if `variables_prefix` is not None.
If set to None (null value in the configuration), all variables will be looked up first in
AWS Parameter Store.
:param config_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null), requests for configurations will not be sent to AWS SSM Parameter Store.
:param config_lookup_pattern: Specifies a pattern the config key needs to match to be looked up in
AWS Parameter Store. Applies only if `config_prefix` is not None.
If set to None (null value in the configuration), all config keys will be looked up first in
AWS Parameter Store.
You can also pass additional keyword arguments listed in AWS Connection Extra config
to this class, and they would be used for establish connection and passed on to Boto3 client.
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
backend_kwargs = {"connections_prefix": "airflow/connections", "region_name": "eu-west-1"}
.. seealso::
:ref:`howto/connection:aws:configuring-the-connection`
"""
def __init__(
self,
connections_prefix: str = "/airflow/connections",
connections_lookup_pattern: str | None = None,
variables_prefix: str = "/airflow/variables",
variables_lookup_pattern: str | None = None,
config_prefix: str = "/airflow/config",
config_lookup_pattern: str | None = None,
**kwargs,
):
super().__init__()
if connections_prefix is not None:
self.connections_prefix = connections_prefix.rstrip("/")
else:
self.connections_prefix = connections_prefix
if variables_prefix is not None:
self.variables_prefix = variables_prefix.rstrip("/")
else:
self.variables_prefix = variables_prefix
if config_prefix is not None:
self.config_prefix = config_prefix.rstrip("/")
else:
self.config_prefix = config_prefix
self.connections_lookup_pattern = connections_lookup_pattern
self.variables_lookup_pattern = variables_lookup_pattern
self.config_lookup_pattern = config_lookup_pattern
self.profile_name = kwargs.get("profile_name", None)
# Remove client specific arguments from kwargs
self.api_version = kwargs.pop("api_version", None)
self.use_ssl = kwargs.pop("use_ssl", None)
self.kwargs = kwargs
@cached_property
def client(self):
"""Create a SSM client."""
from airflow.providers.amazon.aws.hooks.base_aws import SessionFactory
from airflow.providers.amazon.aws.utils.connection_wrapper import AwsConnectionWrapper
conn_id = f"{self.__class__.__name__}__connection"
conn_config = AwsConnectionWrapper.from_connection_metadata(conn_id=conn_id, extra=self.kwargs)
client_kwargs = trim_none_values(
{
"region_name": conn_config.region_name,
"verify": conn_config.verify,
"endpoint_url": conn_config.endpoint_url,
"api_version": self.api_version,
"use_ssl": self.use_ssl,
}
)
session = SessionFactory(conn=conn_config).create_session()
return session.client(service_name="ssm", **client_kwargs)
def get_conn_value(self, conn_id: str) -> str | None:
"""
Get param value.
:param conn_id: connection id
"""
if self.connections_prefix is None:
return None
return self._get_secret(self.connections_prefix, conn_id, self.connections_lookup_pattern)
def get_variable(self, key: str) -> str | None:
"""
Get Airflow Variable.
:param key: Variable Key
:return: Variable Value
"""
if self.variables_prefix is None:
return None
return self._get_secret(self.variables_prefix, key, self.variables_lookup_pattern)
def get_config(self, key: str) -> str | None:
"""
Get Airflow Configuration.
:param key: Configuration Option Key
:return: Configuration Option Value
"""
if self.config_prefix is None:
return None
return self._get_secret(self.config_prefix, key, self.config_lookup_pattern)
def _get_secret(self, path_prefix: str, secret_id: str, lookup_pattern: str | None) -> str | None:
"""
Get secret value from Parameter Store.
:param path_prefix: Prefix for the Path to get Secret
:param secret_id: Secret Key
:param lookup_pattern: If provided, `secret_id` must match this pattern to look up the secret in
Systems Manager
"""
if lookup_pattern and not re.match(lookup_pattern, secret_id, re.IGNORECASE):
return None
ssm_path = self.build_path(path_prefix, secret_id)
ssm_path = self._ensure_leading_slash(ssm_path)
try:
response = self.client.get_parameter(Name=ssm_path, WithDecryption=True)
return response["Parameter"]["Value"]
except self.client.exceptions.ParameterNotFound:
self.log.debug("Parameter %s not found.", ssm_path)
return None
def _ensure_leading_slash(self, ssm_path: str):
"""
AWS Systems Manager mandate to have a leading "/". Adding it dynamically if not there to the SSM path.
:param ssm_path: SSM parameter path
"""
if not ssm_path.startswith("/"):
ssm_path = f"/{ssm_path}"
return ssm_path
| 8,495 | 41.268657 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/secrets/secrets_manager.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects relating to sourcing secrets from AWS Secrets Manager."""
from __future__ import annotations
import json
import re
import warnings
from functools import cached_property
from typing import Any
from urllib.parse import unquote
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.utils import trim_none_values
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
class SecretsManagerBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connection or Variables from AWS Secrets Manager.
Configurable via ``airflow.cfg`` like so:
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend
backend_kwargs = {"connections_prefix": "airflow/connections"}
For example, when ``{"connections_prefix": "airflow/connections"}`` is set, if a secret is defined with
the path ``airflow/connections/smtp_default``, the connection with conn_id ``smtp_default`` would be
accessible.
When ``{"variables_prefix": "airflow/variables"}`` is set, if a secret is defined with
the path ``airflow/variables/hello``, the variable with the name ``hello`` would be accessible.
When ``{"config_prefix": "airflow/config"}`` set, if a secret is defined with
the path ``airflow/config/sql_alchemy_conn``, the config with they ``sql_alchemy_conn`` would be
accessible.
You can also pass additional keyword arguments listed in AWS Connection Extra config
to this class, and they would be used for establishing a connection and passed on to Boto3 client.
.. code-block:: ini
[secrets]
backend = airflow.providers.amazon.aws.secrets.secrets_manager.SecretsManagerBackend
backend_kwargs = {"connections_prefix": "airflow/connections", "region_name": "eu-west-1"}
.. seealso::
:ref:`howto/connection:aws:configuring-the-connection`
There are two ways of storing secrets in Secret Manager for using them with this operator:
storing them as a conn URI in one field, or taking advantage of native approach of Secrets Manager
and storing them in multiple fields. There are certain words that will be searched in the name
of fields for trying to retrieve a connection part. Those words are:
.. code-block:: python
possible_words_for_conn_fields = {
"login": ["login", "user", "username", "user_name"],
"password": ["password", "pass", "key"],
"host": ["host", "remote_host", "server"],
"port": ["port"],
"schema": ["database", "schema"],
"conn_type": ["conn_type", "conn_id", "connection_type", "engine"],
}
However, these lists can be extended using the configuration parameter ``extra_conn_words``. Also,
you can have a field named extra for extra parameters for the conn. Please note that this extra field
must be a valid JSON.
:param connections_prefix: Specifies the prefix of the secret to read to get Connections.
If set to None (null value in the configuration), requests for connections will not be
sent to AWS Secrets Manager. If you don't want a connections_prefix, set it as an empty string
:param connections_lookup_pattern: Specifies a pattern the connection ID needs to match to be looked up in
AWS Secrets Manager. Applies only if `connections_prefix` is not None.
If set to None (null value in the configuration), all connections will be looked up first in
AWS Secrets Manager.
:param variables_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null value in the configuration), requests for variables will not be sent to
AWS Secrets Manager. If you don't want a variables_prefix, set it as an empty string
:param variables_lookup_pattern: Specifies a pattern the variable key needs to match to be looked up in
AWS Secrets Manager. Applies only if `variables_prefix` is not None.
If set to None (null value in the configuration), all variables will be looked up first in
AWS Secrets Manager.
:param config_prefix: Specifies the prefix of the secret to read to get Configurations.
If set to None (null value in the configuration), requests for configurations will not be sent to
AWS Secrets Manager. If you don't want a config_prefix, set it as an empty string
:param config_lookup_pattern: Specifies a pattern the config key needs to match to be looked up in
AWS Secrets Manager. Applies only if `config_prefix` is not None.
If set to None (null value in the configuration), all config keys will be looked up first in
AWS Secrets Manager.
:param sep: separator used to concatenate secret_prefix and secret_id. Default: "/"
:param extra_conn_words: for using just when you set full_url_mode as false and store
the secrets in different fields of secrets manager. You can add more words for each connection
part beyond the default ones. The extra words to be searched should be passed as a dict of lists,
each list corresponding to a connection part. The optional keys of the dict must be: user,
password, host, schema, conn_type.
"""
def __init__(
self,
connections_prefix: str = "airflow/connections",
connections_lookup_pattern: str | None = None,
variables_prefix: str = "airflow/variables",
variables_lookup_pattern: str | None = None,
config_prefix: str = "airflow/config",
config_lookup_pattern: str | None = None,
sep: str = "/",
extra_conn_words: dict[str, list[str]] | None = None,
**kwargs,
):
super().__init__()
if connections_prefix:
self.connections_prefix = connections_prefix.rstrip(sep)
else:
self.connections_prefix = connections_prefix
if variables_prefix:
self.variables_prefix = variables_prefix.rstrip(sep)
else:
self.variables_prefix = variables_prefix
if config_prefix:
self.config_prefix = config_prefix.rstrip(sep)
else:
self.config_prefix = config_prefix
self.connections_lookup_pattern = connections_lookup_pattern
self.variables_lookup_pattern = variables_lookup_pattern
self.config_lookup_pattern = config_lookup_pattern
self.sep = sep
if kwargs.pop("full_url_mode", None) is not None:
warnings.warn(
"The `full_url_mode` kwarg is deprecated. Going forward, the `SecretsManagerBackend`"
" will support both URL-encoded and JSON-encoded secrets at the same time. The encoding"
" of the secret will be determined automatically.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if kwargs.get("are_secret_values_urlencoded") is not None:
warnings.warn(
"The `secret_values_are_urlencoded` is deprecated. This kwarg only exists to assist in"
" migrating away from URL-encoding secret values for JSON secrets."
" To remove this warning, make sure your JSON secrets are *NOT* URL-encoded, and then"
" remove this kwarg from backend_kwargs.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.are_secret_values_urlencoded = kwargs.pop("are_secret_values_urlencoded", None)
else:
self.are_secret_values_urlencoded = False
self.extra_conn_words = extra_conn_words or {}
self.profile_name = kwargs.get("profile_name", None)
# Remove client specific arguments from kwargs
self.api_version = kwargs.pop("api_version", None)
self.use_ssl = kwargs.pop("use_ssl", None)
self.kwargs = kwargs
@cached_property
def client(self):
"""Create a Secrets Manager client."""
from airflow.providers.amazon.aws.hooks.base_aws import SessionFactory
from airflow.providers.amazon.aws.utils.connection_wrapper import AwsConnectionWrapper
conn_id = f"{self.__class__.__name__}__connection"
conn_config = AwsConnectionWrapper.from_connection_metadata(conn_id=conn_id, extra=self.kwargs)
client_kwargs = trim_none_values(
{
"region_name": conn_config.region_name,
"verify": conn_config.verify,
"endpoint_url": conn_config.endpoint_url,
"api_version": self.api_version,
"use_ssl": self.use_ssl,
}
)
session = SessionFactory(conn=conn_config).create_session()
return session.client(service_name="secretsmanager", **client_kwargs)
def _standardize_secret_keys(self, secret: dict[str, Any]) -> dict[str, Any]:
"""Standardize the names of the keys in the dict. These keys align with."""
possible_words_for_conn_fields = {
"login": ["login", "user", "username", "user_name"],
"password": ["password", "pass", "key"],
"host": ["host", "remote_host", "server"],
"port": ["port"],
"schema": ["database", "schema"],
"conn_type": ["conn_type", "conn_id", "connection_type", "engine"],
"extra": ["extra"],
}
for conn_field, extra_words in self.extra_conn_words.items():
if conn_field == "user":
# Support `user` for backwards compatibility.
conn_field = "login"
possible_words_for_conn_fields[conn_field].extend(extra_words)
conn_d: dict[str, Any] = {}
for conn_field, possible_words in possible_words_for_conn_fields.items():
try:
conn_d[conn_field] = [v for k, v in secret.items() if k in possible_words][0]
except IndexError:
conn_d[conn_field] = None
return conn_d
def _remove_escaping_in_secret_dict(self, secret: dict[str, Any]) -> dict[str, Any]:
"""Un-escape secret values that are URL-encoded."""
for k, v in secret.copy().items():
if k == "extra" and isinstance(v, dict):
# The old behavior was that extras were _not_ urlencoded inside the secret.
# So we should just allow the extra dict to remain as-is.
continue
elif v is not None:
secret[k] = unquote(v)
return secret
def get_conn_value(self, conn_id: str) -> str | None:
"""
Get serialized representation of Connection.
:param conn_id: connection id
"""
if self.connections_prefix is None:
return None
secret = self._get_secret(self.connections_prefix, conn_id, self.connections_lookup_pattern)
if secret is not None and secret.strip().startswith("{"):
# Before Airflow 2.3, the AWS SecretsManagerBackend added support for JSON secrets.
#
# The way this was implemented differs a little from how Airflow's core API handle JSON secrets.
#
# The most notable difference is that SecretsManagerBackend supports extra aliases for the
# Connection parts, e.g. "users" is allowed instead of "login".
#
# This means we need to deserialize then re-serialize the secret if it's a JSON, potentially
# renaming some keys in the process.
secret_dict = json.loads(secret)
standardized_secret_dict = self._standardize_secret_keys(secret_dict)
if self.are_secret_values_urlencoded:
standardized_secret_dict = self._remove_escaping_in_secret_dict(standardized_secret_dict)
standardized_secret = json.dumps(standardized_secret_dict)
return standardized_secret
else:
return secret
def get_variable(self, key: str) -> str | None:
"""
Get Airflow Variable.
:param key: Variable Key
:return: Variable Value
"""
if self.variables_prefix is None:
return None
return self._get_secret(self.variables_prefix, key, self.variables_lookup_pattern)
def get_config(self, key: str) -> str | None:
"""
Get Airflow Configuration.
:param key: Configuration Option Key
:return: Configuration Option Value
"""
if self.config_prefix is None:
return None
return self._get_secret(self.config_prefix, key, self.config_lookup_pattern)
def _get_secret(self, path_prefix, secret_id: str, lookup_pattern: str | None) -> str | None:
"""
Get secret value from Secrets Manager.
:param path_prefix: Prefix for the Path to get Secret
:param secret_id: Secret Key
:param lookup_pattern: If provided, `secret_id` must match this pattern to look up the secret in
Secrets Manager
"""
if lookup_pattern and not re.match(lookup_pattern, secret_id, re.IGNORECASE):
return None
error_msg = "An error occurred when calling the get_secret_value operation"
if path_prefix:
secrets_path = self.build_path(path_prefix, secret_id, self.sep)
else:
secrets_path = secret_id
try:
response = self.client.get_secret_value(
SecretId=secrets_path,
)
return response.get("SecretString")
except self.client.exceptions.ResourceNotFoundException:
self.log.debug(
"ResourceNotFoundException: %s. Secret %s not found.",
error_msg,
secret_id,
)
return None
except self.client.exceptions.InvalidParameterException:
self.log.debug(
"InvalidParameterException: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.InvalidRequestException:
self.log.debug(
"InvalidRequestException: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.DecryptionFailure:
self.log.debug(
"DecryptionFailure: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.InternalServiceError:
self.log.debug(
"InternalServiceError: %s",
error_msg,
exc_info=True,
)
return None
| 15,625 | 43.266289 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/secrets/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/links/emr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
import boto3
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.links.base_aws import BASE_AWS_CONSOLE_LINK, BaseAwsLink
from airflow.utils.helpers import exactly_one
class EmrClusterLink(BaseAwsLink):
"""Helper class for constructing AWS EMR Cluster Link."""
name = "EMR Cluster"
key = "emr_cluster"
format_str = BASE_AWS_CONSOLE_LINK + "/emr/home?region={region_name}#/clusterDetails/{job_flow_id}"
class EmrLogsLink(BaseAwsLink):
"""Helper class for constructing AWS EMR Logs Link."""
name = "EMR Cluster Logs"
key = "emr_logs"
format_str = BASE_AWS_CONSOLE_LINK + "/s3/buckets/{log_uri}?region={region_name}&prefix={job_flow_id}/"
def format_link(self, **kwargs) -> str:
if not kwargs["log_uri"]:
return ""
return super().format_link(**kwargs)
def get_log_uri(
*, cluster: dict[str, Any] | None = None, emr_client: boto3.client = None, job_flow_id: str | None = None
) -> str | None:
"""
Retrieves the S3 URI to the EMR Job logs.
Requires either the output of a describe_cluster call or both an EMR Client and a job_flow_id..
"""
if not exactly_one(bool(cluster), emr_client and job_flow_id):
raise AirflowException(
"Requires either the output of a describe_cluster call or both an EMR Client and a job_flow_id."
)
cluster_info = (cluster or emr_client.describe_cluster(ClusterId=job_flow_id))["Cluster"]
if "LogUri" not in cluster_info:
return None
log_uri = S3Hook.parse_s3_url(cluster_info["LogUri"])
return "/".join(log_uri)
| 2,519 | 36.058824 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/links/glue.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.providers.amazon.aws.links.base_aws import BASE_AWS_CONSOLE_LINK, BaseAwsLink
class GlueJobRunDetailsLink(BaseAwsLink):
"""Helper class for constructing AWS Glue Job Run Details Link."""
name = "AWS Glue Job Run Details"
key = "glue_job_run_details"
format_str = (
BASE_AWS_CONSOLE_LINK + "/gluestudio/home?region={region_name}#/job/{job_name}/run/{job_run_id}"
)
| 1,229 | 40 | 104 | py |
airflow | airflow-main/airflow/providers/amazon/aws/links/base_aws.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, ClassVar
from airflow.models import BaseOperatorLink, XCom
if TYPE_CHECKING:
from airflow.models import BaseOperator
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
BASE_AWS_CONSOLE_LINK = "https://console.{aws_domain}"
class BaseAwsLink(BaseOperatorLink):
"""Base Helper class for constructing AWS Console Link."""
name: ClassVar[str]
key: ClassVar[str]
format_str: ClassVar[str]
@staticmethod
def get_aws_domain(aws_partition) -> str | None:
if aws_partition == "aws":
return "aws.amazon.com"
elif aws_partition == "aws-cn":
return "amazonaws.cn"
elif aws_partition == "aws-us-gov":
return "amazonaws-us-gov.com"
return None
def format_link(self, **kwargs) -> str:
"""
Format AWS Service Link.
Some AWS Service Link should require additional escaping
in this case this method should be overridden.
"""
try:
return self.format_str.format(**kwargs)
except KeyError:
return ""
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
"""
Link to Amazon Web Services Console.
:param operator: airflow operator
:param ti_key: TaskInstance ID to return link for
:return: link to external system
"""
conf = XCom.get_value(key=self.key, ti_key=ti_key)
return self.format_link(**conf) if conf else ""
@classmethod
def persist(
cls, context: Context, operator: BaseOperator, region_name: str, aws_partition: str, **kwargs
) -> None:
"""Store link information into XCom."""
if not operator.do_xcom_push:
return
operator.xcom_push(
context,
key=cls.key,
value={
"region_name": region_name,
"aws_domain": cls.get_aws_domain(aws_partition),
**kwargs,
},
)
| 2,946 | 29.697917 | 101 | py |
airflow | airflow-main/airflow/providers/amazon/aws/links/batch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.providers.amazon.aws.links.base_aws import BASE_AWS_CONSOLE_LINK, BaseAwsLink
class BatchJobDefinitionLink(BaseAwsLink):
"""Helper class for constructing AWS Batch Job Definition Link."""
name = "Batch Job Definition"
key = "batch_job_definition"
format_str = (
BASE_AWS_CONSOLE_LINK + "/batch/home?region={region_name}#job-definition/detail/{job_definition_arn}"
)
class BatchJobDetailsLink(BaseAwsLink):
"""Helper class for constructing AWS Batch Job Details Link."""
name = "Batch Job Details"
key = "batch_job_details"
format_str = BASE_AWS_CONSOLE_LINK + "/batch/home?region={region_name}#jobs/detail/{job_id}"
class BatchJobQueueLink(BaseAwsLink):
"""Helper class for constructing AWS Batch Job Queue Link."""
name = "Batch Job Queue"
key = "batch_job_queue"
format_str = BASE_AWS_CONSOLE_LINK + "/batch/home?region={region_name}#queues/detail/{job_queue_arn}"
| 1,770 | 37.5 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/links/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/links/logs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from urllib.parse import quote_plus
from airflow.providers.amazon.aws.links.base_aws import BASE_AWS_CONSOLE_LINK, BaseAwsLink
class CloudWatchEventsLink(BaseAwsLink):
"""Helper class for constructing AWS CloudWatch Events Link."""
name = "CloudWatch Events"
key = "cloudwatch_events"
format_str = (
BASE_AWS_CONSOLE_LINK
+ "/cloudwatch/home?region={awslogs_region}#logsV2:log-groups/log-group/{awslogs_group}"
+ "/log-events/{awslogs_stream_name}"
)
def format_link(self, **kwargs) -> str:
for field in ("awslogs_stream_name", "awslogs_group"):
if field in kwargs:
kwargs[field] = quote_plus(kwargs[field])
else:
return ""
return super().format_link(**kwargs)
| 1,608 | 36.418605 | 96 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/local_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
if TYPE_CHECKING:
from airflow.utils.context import Context
class LocalFilesystemToS3Operator(BaseOperator):
"""
Uploads a file from a local filesystem to Amazon S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LocalFilesystemToS3Operator`
:param filename: Path to the local file. Path can be either absolute
(e.g. /path/to/file.ext) or relative (e.g. ../../foo/*/*.csv). (templated)
:param dest_key: The key of the object to copy to. (templated)
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit `dest_bucket`.
:param dest_bucket: Name of the S3 bucket to where the object is copied. (templated)
:param aws_conn_id: Connection id of the S3 connection to use
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used,
but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param gzip: If True, the file will be compressed locally
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
"""
template_fields: Sequence[str] = ("filename", "dest_key", "dest_bucket")
def __init__(
self,
*,
filename: str,
dest_key: str,
dest_bucket: str | None = None,
aws_conn_id: str = "aws_default",
verify: str | bool | None = None,
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.filename = filename
self.dest_key = dest_key
self.dest_bucket = dest_bucket
self.aws_conn_id = aws_conn_id
self.verify = verify
self.replace = replace
self.encrypt = encrypt
self.gzip = gzip
self.acl_policy = acl_policy
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
s3_bucket, s3_key = s3_hook.get_s3_bucket_key(
self.dest_bucket, self.dest_key, "dest_bucket", "dest_key"
)
s3_hook.load_file(
self.filename,
s3_key,
s3_bucket,
self.replace,
self.encrypt,
self.gzip,
self.acl_policy,
)
| 4,158 | 37.155963 | 88 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/dynamodb_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from copy import copy
from datetime import datetime
from decimal import Decimal
from functools import cached_property
from os.path import getsize
from tempfile import NamedTemporaryFile
from typing import IO, TYPE_CHECKING, Any, Callable, Sequence
from uuid import uuid4
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.dynamodb import DynamoDBHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.transfers.base import AwsToAwsBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class JSONEncoder(json.JSONEncoder):
"""Custom json encoder implementation."""
def default(self, obj):
"""Convert decimal objects in a json serializable format."""
if isinstance(obj, Decimal):
return float(obj)
return super().default(obj)
def _convert_item_to_json_bytes(item: dict[str, Any]) -> bytes:
return (json.dumps(item, cls=JSONEncoder) + "\n").encode("utf-8")
def _upload_file_to_s3(
file_obj: IO,
bucket_name: str,
s3_key_prefix: str,
aws_conn_id: str | None = AwsBaseHook.default_conn_name,
) -> None:
s3_client = S3Hook(aws_conn_id=aws_conn_id).get_conn()
file_obj.seek(0)
s3_client.upload_file(
Filename=file_obj.name,
Bucket=bucket_name,
Key=s3_key_prefix + str(uuid4()),
)
class DynamoDBToS3Operator(AwsToAwsBaseOperator):
"""
Replicates records from a DynamoDB table to S3.
It scans a DynamoDB table and writes the received records to a file
on the local filesystem. It flushes the file to S3 once the file size
exceeds the file size limit specified by the user.
Users can also specify a filtering criteria using dynamodb_scan_kwargs
to only replicate records that satisfy the criteria.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/transfer:DynamoDBToS3Operator`
:param dynamodb_table_name: Dynamodb table to replicate data from
:param s3_bucket_name: S3 bucket to replicate data to
:param file_size: Flush file to s3 if file size >= file_size
:param dynamodb_scan_kwargs: kwargs pass to
<https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.scan>
:param s3_key_prefix: Prefix of s3 object key
:param process_func: How we transform a dynamodb item to bytes. By default, we dump the json
:param export_time: Time in the past from which to export table data, counted in seconds from the start of
the Unix epoch. The table export will be a snapshot of the table's state at this point in time.
:param export_format: The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON
or ION.
"""
template_fields: Sequence[str] = (
*AwsToAwsBaseOperator.template_fields,
"dynamodb_table_name",
"s3_bucket_name",
"file_size",
"dynamodb_scan_kwargs",
"s3_key_prefix",
"process_func",
"export_time",
"export_format",
)
template_fields_renderers = {
"dynamodb_scan_kwargs": "json",
}
def __init__(
self,
*,
dynamodb_table_name: str,
s3_bucket_name: str,
file_size: int,
dynamodb_scan_kwargs: dict[str, Any] | None = None,
s3_key_prefix: str = "",
process_func: Callable[[dict[str, Any]], bytes] = _convert_item_to_json_bytes,
export_time: datetime | None = None,
export_format: str = "DYNAMODB_JSON",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.file_size = file_size
self.process_func = process_func
self.dynamodb_table_name = dynamodb_table_name
self.dynamodb_scan_kwargs = dynamodb_scan_kwargs
self.s3_bucket_name = s3_bucket_name
self.s3_key_prefix = s3_key_prefix
self.export_time = export_time
self.export_format = export_format
@cached_property
def hook(self):
"""Create DynamoDBHook."""
return DynamoDBHook(aws_conn_id=self.source_aws_conn_id)
def execute(self, context: Context) -> None:
if self.export_time:
self._export_table_to_point_in_time()
else:
self._export_entire_data()
def _export_table_to_point_in_time(self):
"""
Export data from start of epoc till `export_time`.
Table export will be a snapshot of the table's state at this point in time.
"""
if self.export_time and self.export_time > datetime.now(self.export_time.tzinfo):
raise ValueError("The export_time parameter cannot be a future time.")
client = self.hook.conn.meta.client
table_description = client.describe_table(TableName=self.dynamodb_table_name)
response = client.export_table_to_point_in_time(
TableArn=table_description.get("Table", {}).get("TableArn"),
ExportTime=self.export_time,
S3Bucket=self.s3_bucket_name,
S3Prefix=self.s3_key_prefix,
ExportFormat=self.export_format,
)
waiter = self.hook.get_waiter("export_table")
export_arn = response.get("ExportDescription", {}).get("ExportArn")
waiter.wait(ExportArn=export_arn)
def _export_entire_data(self):
"""Export all data from the table."""
table = self.hook.conn.Table(self.dynamodb_table_name)
scan_kwargs = copy(self.dynamodb_scan_kwargs) if self.dynamodb_scan_kwargs else {}
err = None
f: IO[Any]
with NamedTemporaryFile() as f:
try:
f = self._scan_dynamodb_and_upload_to_s3(f, scan_kwargs, table)
except Exception as e:
err = e
raise e
finally:
if err is None:
_upload_file_to_s3(f, self.s3_bucket_name, self.s3_key_prefix, self.dest_aws_conn_id)
def _scan_dynamodb_and_upload_to_s3(self, temp_file: IO, scan_kwargs: dict, table: Any) -> IO:
while True:
response = table.scan(**scan_kwargs)
items = response["Items"]
for item in items:
temp_file.write(self.process_func(item))
if "LastEvaluatedKey" not in response:
# no more items to scan
break
last_evaluated_key = response["LastEvaluatedKey"]
scan_kwargs["ExclusiveStartKey"] = last_evaluated_key
# Upload the file to S3 if reach file size limit
if getsize(temp_file.name) >= self.file_size:
_upload_file_to_s3(temp_file, self.s3_bucket_name, self.s3_key_prefix, self.dest_aws_conn_id)
temp_file.close()
temp_file = NamedTemporaryFile()
return temp_file
| 7,755 | 36.650485 | 118 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/base.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains base AWS to AWS transfer operator."""
from __future__ import annotations
import warnings
from typing import Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils.types import NOTSET, ArgNotSet
_DEPRECATION_MSG = (
"The aws_conn_id parameter has been deprecated. Use the source_aws_conn_id parameter instead."
)
class AwsToAwsBaseOperator(BaseOperator):
"""
Base class for AWS to AWS transfer operators.
:param source_aws_conn_id: The Airflow connection used for AWS credentials
to access DynamoDB. If this is None or empty then the default boto3
behaviour is used. If running Airflow in a distributed manner and
source_aws_conn_id is None or empty, then default boto3 configuration
would be used (and must be maintained on each worker node).
:param dest_aws_conn_id: The Airflow connection used for AWS credentials
to access S3. If this is not set then the source_aws_conn_id connection is used.
:param aws_conn_id: The Airflow connection used for AWS credentials (deprecated; use source_aws_conn_id).
"""
template_fields: Sequence[str] = (
"source_aws_conn_id",
"dest_aws_conn_id",
)
def __init__(
self,
*,
source_aws_conn_id: str | None = AwsBaseHook.default_conn_name,
dest_aws_conn_id: str | None | ArgNotSet = NOTSET,
aws_conn_id: str | None | ArgNotSet = NOTSET,
**kwargs,
) -> None:
super().__init__(**kwargs)
if not isinstance(aws_conn_id, ArgNotSet):
warnings.warn(_DEPRECATION_MSG, AirflowProviderDeprecationWarning, stacklevel=3)
self.source_aws_conn_id = aws_conn_id
else:
self.source_aws_conn_id = source_aws_conn_id
self.dest_aws_conn_id = (
self.source_aws_conn_id if isinstance(dest_aws_conn_id, ArgNotSet) else dest_aws_conn_id
)
| 2,850 | 39.15493 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/salesforce_to_s3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import tempfile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.salesforce.hooks.salesforce import SalesforceHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SalesforceToS3Operator(BaseOperator):
"""
Submits a Salesforce query and uploads the results to AWS S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SalesforceToS3Operator`
:param salesforce_query: The query to send to Salesforce.
:param s3_bucket_name: The bucket name to upload to.
:param s3_key: The object name to set when uploading the file.
:param salesforce_conn_id: The name of the connection that has the parameters needed
to connect to Salesforce.
:param export_format: Desired format of files to be exported.
:param query_params: Additional optional arguments to be passed to the HTTP request querying Salesforce.
:param include_deleted: True if the query should include deleted records.
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:param aws_conn_id: The name of the connection that has the parameters we need to connect to S3.
:param replace: A flag to decide whether or not to overwrite the S3 key if it already exists. If set to
False and the key exists an error will be raised.
:param encrypt: If True, the file will be encrypted on the server-side by S3 and will
be stored in an encrypted form while at rest in S3.
:param gzip: If True, the file will be compressed locally.
:param acl_policy: String specifying the canned ACL policy for the file being uploaded
to the S3 bucket.
"""
template_fields: Sequence[str] = ("salesforce_query", "s3_bucket_name", "s3_key")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"salesforce_query": "sql"}
def __init__(
self,
*,
salesforce_query: str,
s3_bucket_name: str,
s3_key: str,
salesforce_conn_id: str,
export_format: str = "csv",
query_params: dict | None = None,
include_deleted: bool = False,
coerce_to_timestamp: bool = False,
record_time_added: bool = False,
aws_conn_id: str = "aws_default",
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.salesforce_query = salesforce_query
self.s3_bucket_name = s3_bucket_name
self.s3_key = s3_key
self.salesforce_conn_id = salesforce_conn_id
self.export_format = export_format
self.query_params = query_params
self.include_deleted = include_deleted
self.coerce_to_timestamp = coerce_to_timestamp
self.record_time_added = record_time_added
self.aws_conn_id = aws_conn_id
self.replace = replace
self.encrypt = encrypt
self.gzip = gzip
self.acl_policy = acl_policy
def execute(self, context: Context) -> str:
salesforce_hook = SalesforceHook(salesforce_conn_id=self.salesforce_conn_id)
response = salesforce_hook.make_query(
query=self.salesforce_query,
include_deleted=self.include_deleted,
query_params=self.query_params,
)
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "salesforce_temp_file")
salesforce_hook.write_object_to_file(
query_results=response["records"],
filename=path,
fmt=self.export_format,
coerce_to_timestamp=self.coerce_to_timestamp,
record_time_added=self.record_time_added,
)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
s3_hook.load_file(
filename=path,
key=self.s3_key,
bucket_name=self.s3_bucket_name,
replace=self.replace,
encrypt=self.encrypt,
gzip=self.gzip,
acl_policy=self.acl_policy,
)
s3_uri = f"s3://{self.s3_bucket_name}/{self.s3_key}"
self.log.info("Salesforce data uploaded to S3 at %s.", s3_uri)
return s3_uri
| 5,679 | 41.38806 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/redshift_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Transfers data from AWS Redshift into a S3 Bucket."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, Mapping, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.redshift_data import RedshiftDataHook
from airflow.providers.amazon.aws.hooks.redshift_sql import RedshiftSQLHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.utils.redshift import build_credentials_block
if TYPE_CHECKING:
from airflow.utils.context import Context
class RedshiftToS3Operator(BaseOperator):
"""
Execute an UNLOAD command to s3 as a CSV with headers.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftToS3Operator`
:param s3_bucket: reference to a specific S3 bucket
:param s3_key: reference to a specific S3 key. If ``table_as_file_name`` is set
to False, this param must include the desired file name
:param schema: reference to a specific schema in redshift database
Applicable when ``table`` param provided.
:param table: reference to a specific table in redshift database
Used when ``select_query`` param not provided.
:param select_query: custom select query to fetch data from redshift database
:param redshift_conn_id: reference to a specific redshift database
:param aws_conn_id: reference to a specific S3 connection
If the AWS connection contains 'aws_iam_role' in ``extras``
the operator will use AWS STS credentials with a token
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param unload_options: reference to a list of UNLOAD options
:param autocommit: If set to True it will automatically commit the UNLOAD statement.
Otherwise it will be committed right before the redshift connection gets closed.
:param include_header: If set to True the s3 file contains the header columns.
:param parameters: (optional) the parameters to render the SQL query with.
:param table_as_file_name: If set to True, the s3 file will be named as the table.
Applicable when ``table`` param provided.
:param redshift_data_api_kwargs: If using the Redshift Data API instead of the SQL-based connection,
dict of arguments for the hook's ``execute_query`` method.
Cannot include any of these kwargs: ``{'sql', 'parameters'}``
"""
template_fields: Sequence[str] = (
"s3_bucket",
"s3_key",
"schema",
"table",
"unload_options",
"select_query",
"redshift_conn_id",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"select_query": "sql"}
ui_color = "#ededed"
def __init__(
self,
*,
s3_bucket: str,
s3_key: str,
schema: str | None = None,
table: str | None = None,
select_query: str | None = None,
redshift_conn_id: str = "redshift_default",
aws_conn_id: str = "aws_default",
verify: bool | str | None = None,
unload_options: list | None = None,
autocommit: bool = False,
include_header: bool = False,
parameters: Iterable | Mapping | None = None,
table_as_file_name: bool = True, # Set to True by default for not breaking current workflows
redshift_data_api_kwargs: dict = {},
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = f"{s3_key}/{table}_" if (table and table_as_file_name) else s3_key
self.schema = schema
self.table = table
self.redshift_conn_id = redshift_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.unload_options: list = unload_options or []
self.autocommit = autocommit
self.include_header = include_header
self.parameters = parameters
self.table_as_file_name = table_as_file_name
self.redshift_data_api_kwargs = redshift_data_api_kwargs
if select_query:
self.select_query = select_query
elif self.schema and self.table:
self.select_query = f"SELECT * FROM {self.schema}.{self.table}"
else:
raise ValueError(
"Please provide both `schema` and `table` params or `select_query` to fetch the data."
)
if self.include_header and "HEADER" not in [uo.upper().strip() for uo in self.unload_options]:
self.unload_options = list(self.unload_options) + [
"HEADER",
]
if self.redshift_data_api_kwargs:
for arg in ["sql", "parameters"]:
if arg in self.redshift_data_api_kwargs.keys():
raise AirflowException(f"Cannot include param '{arg}' in Redshift Data API kwargs")
def _build_unload_query(
self, credentials_block: str, select_query: str, s3_key: str, unload_options: str
) -> str:
return f"""
UNLOAD ('{select_query}')
TO 's3://{self.s3_bucket}/{s3_key}'
credentials
'{credentials_block}'
{unload_options};
"""
def execute(self, context: Context) -> None:
redshift_hook: RedshiftDataHook | RedshiftSQLHook
if self.redshift_data_api_kwargs:
redshift_hook = RedshiftDataHook(aws_conn_id=self.redshift_conn_id)
else:
redshift_hook = RedshiftSQLHook(redshift_conn_id=self.redshift_conn_id)
conn = S3Hook.get_connection(conn_id=self.aws_conn_id)
if conn.extra_dejson.get("role_arn", False):
credentials_block = f"aws_iam_role={conn.extra_dejson['role_arn']}"
else:
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
credentials = s3_hook.get_credentials()
credentials_block = build_credentials_block(credentials)
unload_options = "\n\t\t\t".join(self.unload_options)
unload_query = self._build_unload_query(
credentials_block, self.select_query, self.s3_key, unload_options
)
self.log.info("Executing UNLOAD command...")
if isinstance(redshift_hook, RedshiftDataHook):
redshift_hook.execute_query(
sql=unload_query, parameters=self.parameters, **self.redshift_data_api_kwargs
)
else:
redshift_hook.run(unload_query, self.autocommit, parameters=self.parameters)
self.log.info("UNLOAD command complete...")
| 8,108 | 43.554945 | 106 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/gcs_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to S3 operator."""
from __future__ import annotations
import os
import warnings
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToS3Operator(BaseOperator):
"""
Synchronizes a Google Cloud Storage bucket with an S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToS3Operator`
:param bucket: The Google Cloud Storage bucket to find the objects. (templated)
:param prefix: Prefix string which filters objects whose name begin with
this prefix. (templated)
:param delimiter: (Deprecated) The delimiter by which you want to filter the objects. (templated)
For e.g to lists the CSV files from in a directory in GCS you would use
delimiter='.csv'.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param dest_aws_conn_id: The destination S3 connection
:param dest_s3_key: The base S3 key to be used to store the files. (templated)
:param dest_verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param replace: Whether or not to verify the existence of the files in the
destination bucket.
By default is set to False
If set to True, will upload all the files replacing the existing ones in
the destination bucket.
If set to False, will upload only the files that are in the origin but not
in the destination bucket.
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param s3_acl_policy: Optional The string to specify the canned ACL policy for the
object to be uploaded in S3
:param keep_directory_structure: (Optional) When set to False the path of the file
on the bucket is recreated within path passed in dest_s3_key.
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``)
"""
template_fields: Sequence[str] = (
"bucket",
"prefix",
"delimiter",
"dest_s3_key",
"google_impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
bucket: str,
prefix: str | None = None,
delimiter: str | None = None,
gcp_conn_id: str = "google_cloud_default",
dest_aws_conn_id: str = "aws_default",
dest_s3_key: str,
dest_verify: str | bool | None = None,
replace: bool = False,
google_impersonation_chain: str | Sequence[str] | None = None,
dest_s3_extra_args: dict | None = None,
s3_acl_policy: str | None = None,
keep_directory_structure: bool = True,
match_glob: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
if delimiter:
warnings.warn(
"Usage of 'delimiter' is deprecated, please use 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.delimiter = delimiter
self.gcp_conn_id = gcp_conn_id
self.dest_aws_conn_id = dest_aws_conn_id
self.dest_s3_key = dest_s3_key
self.dest_verify = dest_verify
self.replace = replace
self.google_impersonation_chain = google_impersonation_chain
self.dest_s3_extra_args = dest_s3_extra_args or {}
self.s3_acl_policy = s3_acl_policy
self.keep_directory_structure = keep_directory_structure
self.match_glob = match_glob
def execute(self, context: Context) -> list[str]:
# list all files in an Google Cloud Storage bucket
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
self.log.info(
"Getting list of the files. Bucket: %s; Delimiter: %s; Prefix: %s",
self.bucket,
self.delimiter,
self.prefix,
)
files = hook.list(
bucket_name=self.bucket, prefix=self.prefix, delimiter=self.delimiter, match_glob=self.match_glob
)
s3_hook = S3Hook(
aws_conn_id=self.dest_aws_conn_id, verify=self.dest_verify, extra_args=self.dest_s3_extra_args
)
if not self.keep_directory_structure and self.prefix:
self.dest_s3_key = os.path.join(self.dest_s3_key, self.prefix)
if not self.replace:
# if we are not replacing -> list all files in the S3 bucket
# and only keep those files which are present in
# Google Cloud Storage and not in S3
bucket_name, prefix = S3Hook.parse_s3_url(self.dest_s3_key)
# if prefix is empty, do not add "/" at end since it would
# filter all the objects (return empty list) instead of empty
# prefix returning all the objects
if prefix:
prefix = prefix if prefix.endswith("/") else f"{prefix}/"
# look for the bucket and the prefix to avoid look into
# parent directories/keys
existing_files = s3_hook.list_keys(bucket_name, prefix=prefix)
# in case that no files exists, return an empty array to avoid errors
existing_files = existing_files if existing_files is not None else []
# remove the prefix for the existing files to allow the match
existing_files = [file.replace(prefix, "", 1) for file in existing_files]
files = list(set(files) - set(existing_files))
if files:
for file in files:
with hook.provide_file(object_name=file, bucket_name=self.bucket) as local_tmp_file:
dest_key = os.path.join(self.dest_s3_key, file)
self.log.info("Saving file to %s", dest_key)
s3_hook.load_file(
filename=local_tmp_file.name,
key=dest_key,
replace=self.replace,
acl_policy=self.s3_acl_policy,
)
self.log.info("All done, uploaded %d files to S3", len(files))
else:
self.log.info("In sync, no files needed to be uploaded to S3")
return files
| 8,544 | 42.375635 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/hive_to_dynamodb.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator to move data from Hive to DynamoDB."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Callable, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.dynamodb import DynamoDBHook
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
if TYPE_CHECKING:
from airflow.utils.context import Context
class HiveToDynamoDBOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB.
Note that for now the data is loaded into memory before being pushed
to DynamoDB, so this operator should be used for smallish amount of data.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/transfer:HiveToDynamoDBOperator`
:param sql: SQL query to execute against the hive database. (templated)
:param table_name: target DynamoDB table
:param table_keys: partition key and sort key
:param pre_process: implement pre-processing of source data
:param pre_process_args: list of pre_process function arguments
:param pre_process_kwargs: dict of pre_process function arguments
:param region_name: aws region name (example: us-east-1)
:param schema: hive database schema
:param hiveserver2_conn_id: Reference to the
:ref: `Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
:param aws_conn_id: aws connection
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "hql"}
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
table_name: str,
table_keys: list,
pre_process: Callable | None = None,
pre_process_args: list | None = None,
pre_process_kwargs: list | None = None,
region_name: str | None = None,
schema: str = "default",
hiveserver2_conn_id: str = "hiveserver2_default",
aws_conn_id: str = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info("Extracting data from Hive")
self.log.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = DynamoDBHook(
aws_conn_id=self.aws_conn_id,
table_name=self.table_name,
table_keys=self.table_keys,
region_name=self.region_name,
)
self.log.info("Inserting rows into dynamodb")
if self.pre_process is None:
dynamodb.write_batch_data(json.loads(data.to_json(orient="records")))
else:
dynamodb.write_batch_data(
self.pre_process(data=data, args=self.pre_process_args, kwargs=self.pre_process_kwargs)
)
self.log.info("Done.")
| 4,173 | 36.267857 | 103 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/mongo_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Iterable, Sequence, cast
from bson import json_util
from pymongo.command_cursor import CommandCursor
from pymongo.cursor import Cursor
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.mongo.hooks.mongo import MongoHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class MongoToS3Operator(BaseOperator):
"""Move data from MongoDB to S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MongoToS3Operator`
:param mongo_conn_id: reference to a specific mongo connection
:param aws_conn_id: reference to a specific S3 connection
:param mongo_collection: reference to a specific collection in your mongo db
:param mongo_query: query to execute. A list including a dict of the query
:param mongo_projection: optional parameter to filter the returned fields by
the query. It can be a list of fields names to include or a dictionary
for excluding fields (e.g ``projection={"_id": 0}`` )
:param s3_bucket: reference to a specific S3 bucket to store the data
:param s3_key: in which S3 key the file will be stored
:param mongo_db: reference to a specific mongo database
:param replace: whether or not to replace the file in S3 if it previously existed
:param allow_disk_use: enables writing to temporary files in the case you are handling large dataset.
This only takes effect when `mongo_query` is a list - running an aggregate pipeline
:param compression: type of compression to use for output file in S3. Currently only gzip is supported.
"""
template_fields: Sequence[str] = ("s3_bucket", "s3_key", "mongo_query", "mongo_collection")
ui_color = "#589636"
template_fields_renderers = {"mongo_query": "json"}
def __init__(
self,
*,
mongo_conn_id: str = "mongo_default",
aws_conn_id: str = "aws_default",
mongo_collection: str,
mongo_query: list | dict,
s3_bucket: str,
s3_key: str,
mongo_db: str | None = None,
mongo_projection: list | dict | None = None,
replace: bool = False,
allow_disk_use: bool = False,
compression: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.mongo_conn_id = mongo_conn_id
self.aws_conn_id = aws_conn_id
self.mongo_db = mongo_db
self.mongo_collection = mongo_collection
# Grab query and determine if we need to run an aggregate pipeline
self.mongo_query = mongo_query
self.is_pipeline = isinstance(self.mongo_query, list)
self.mongo_projection = mongo_projection
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.replace = replace
self.allow_disk_use = allow_disk_use
self.compression = compression
def execute(self, context: Context):
"""Is written to depend on transform method."""
s3_conn = S3Hook(self.aws_conn_id)
# Grab collection and execute query according to whether or not it is a pipeline
if self.is_pipeline:
results: CommandCursor[Any] | Cursor = MongoHook(self.mongo_conn_id).aggregate(
mongo_collection=self.mongo_collection,
aggregate_query=cast(list, self.mongo_query),
mongo_db=self.mongo_db,
allowDiskUse=self.allow_disk_use,
)
else:
results = MongoHook(self.mongo_conn_id).find(
mongo_collection=self.mongo_collection,
query=cast(dict, self.mongo_query),
projection=self.mongo_projection,
mongo_db=self.mongo_db,
find_one=False,
)
# Performs transform then stringifies the docs results into json format
docs_str = self._stringify(self.transform(results))
s3_conn.load_string(
string_data=docs_str,
key=self.s3_key,
bucket_name=self.s3_bucket,
replace=self.replace,
compression=self.compression,
)
@staticmethod
def _stringify(iterable: Iterable, joinable: str = "\n") -> str:
"""Stringify an iterable of dicts.
This dumps each dict with JSON, and joins them with ``joinable``.
"""
return joinable.join(json.dumps(doc, default=json_util.default) for doc in iterable)
@staticmethod
def transform(docs: Any) -> Any:
"""Transform the data for transfer.
This method is meant to be extended by child classes to perform
transformations unique to those operators needs. Processes pyMongo
cursor and returns an iterable with each element being a JSON
serializable dictionary
The default implementation assumes no processing is needed, i.e. input
is a pyMongo cursor of documents and just needs to be passed through.
Override this method for custom transformations.
"""
return docs
| 5,981 | 38.615894 | 107 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/sftp_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from urllib.parse import urlsplit
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.ssh.hooks.ssh import SSHHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SFTPToS3Operator(BaseOperator):
"""
Transfer files from an SFTP server to Amazon S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SFTPToS3Operator`
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param sftp_path: The sftp remote path. This is the specified file path
for downloading the file from the SFTP server.
:param s3_conn_id: The s3 connection id. The name or identifier for
establishing a connection to S3
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket to where
the file is uploaded.
:param s3_key: The targeted s3 key. This is the specified path for
uploading the file to S3.
:param use_temp_file: If True, copies file first to local,
if False streams file from SFTP to S3.
"""
template_fields: Sequence[str] = ("s3_key", "sftp_path", "s3_bucket")
def __init__(
self,
*,
s3_bucket: str,
s3_key: str,
sftp_path: str,
sftp_conn_id: str = "ssh_default",
s3_conn_id: str = "aws_default",
use_temp_file: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sftp_conn_id = sftp_conn_id
self.sftp_path = sftp_path
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.s3_conn_id = s3_conn_id
self.use_temp_file = use_temp_file
@staticmethod
def get_s3_key(s3_key: str) -> str:
"""This parses the correct format for S3 keys regardless of how the S3 url is passed."""
parsed_s3_key = urlsplit(s3_key)
return parsed_s3_key.path.lstrip("/")
def execute(self, context: Context) -> None:
self.s3_key = self.get_s3_key(self.s3_key)
ssh_hook = SSHHook(ssh_conn_id=self.sftp_conn_id)
s3_hook = S3Hook(self.s3_conn_id)
sftp_client = ssh_hook.get_conn().open_sftp()
if self.use_temp_file:
with NamedTemporaryFile("w") as f:
sftp_client.get(self.sftp_path, f.name)
s3_hook.load_file(filename=f.name, key=self.s3_key, bucket_name=self.s3_bucket, replace=True)
else:
with sftp_client.file(self.sftp_path, mode="rb") as data:
s3_hook.get_conn().upload_fileobj(data, self.s3_bucket, self.s3_key, Callback=self.log.info)
| 3,643 | 36.958333 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/imap_attachment_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module allows you to transfer mail attachments from a mail server into s3 bucket."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.imap.hooks.imap import ImapHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class ImapAttachmentToS3Operator(BaseOperator):
"""
Transfers a mail attachment from a mail server into s3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ImapAttachmentToS3Operator`
:param imap_attachment_name: The file name of the mail attachment that you want to transfer.
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket where the file will be downloaded.
:param s3_key: The destination file name in the s3 bucket for the attachment.
:param imap_check_regex: If set checks the `imap_attachment_name` for a regular expression.
:param imap_mail_folder: The folder on the mail server to look for the attachment.
:param imap_mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param s3_overwrite: If set overwrites the s3 key if already exists.
:param imap_conn_id: The reference to the connection details of the mail server.
:param aws_conn_id: AWS connection to use.
"""
template_fields: Sequence[str] = ("imap_attachment_name", "s3_key", "imap_mail_filter")
def __init__(
self,
*,
imap_attachment_name: str,
s3_bucket: str,
s3_key: str,
imap_check_regex: bool = False,
imap_mail_folder: str = "INBOX",
imap_mail_filter: str = "All",
s3_overwrite: bool = False,
imap_conn_id: str = "imap_default",
aws_conn_id: str = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.imap_attachment_name = imap_attachment_name
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.imap_check_regex = imap_check_regex
self.imap_mail_folder = imap_mail_folder
self.imap_mail_filter = imap_mail_filter
self.s3_overwrite = s3_overwrite
self.imap_conn_id = imap_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context: Context) -> None:
"""
This function executes the transfer from the email server (via imap) into s3.
:param context: The context while executing.
"""
self.log.info(
"Transferring mail attachment %s from mail server via imap to s3 key %s...",
self.imap_attachment_name,
self.s3_key,
)
with ImapHook(imap_conn_id=self.imap_conn_id) as imap_hook:
imap_mail_attachments = imap_hook.retrieve_mail_attachments(
name=self.imap_attachment_name,
check_regex=self.imap_check_regex,
latest_only=True,
mail_folder=self.imap_mail_folder,
mail_filter=self.imap_mail_filter,
)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
s3_hook.load_bytes(
bytes_data=imap_mail_attachments[0][1],
bucket_name=self.s3_bucket,
key=self.s3_key,
replace=self.s3_overwrite,
)
| 4,248 | 39.084906 | 102 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/glacier_to_gcs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.glacier import GlacierHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GlacierToGCSOperator(BaseOperator):
"""
Transfers data from Amazon Glacier to Google Cloud Storage.
.. note::
Please be warn that GlacierToGCSOperator may depends on memory usage.
Transferring big files may not working well.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlacierToGCSOperator`
:param aws_conn_id: The reference to the AWS connection details
:param gcp_conn_id: The reference to the GCP connection details
:param vault_name: the Glacier vault on which job is executed
:param bucket_name: the Google Cloud Storage bucket where the data will be transferred
:param object_name: the name of the object to check in the Google cloud
storage bucket.
:param gzip: option to compress local file or file data for upload
:param chunk_size: size of chunk in bytes the that will be downloaded from Glacier vault
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("vault_name", "bucket_name", "object_name")
def __init__(
self,
*,
aws_conn_id: str = "aws_default",
gcp_conn_id: str = "google_cloud_default",
vault_name: str,
bucket_name: str,
object_name: str,
gzip: bool,
chunk_size: int = 1024,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.gcp_conn_id = gcp_conn_id
self.vault_name = vault_name
self.bucket_name = bucket_name
self.object_name = object_name
self.gzip = gzip
self.chunk_size = chunk_size
self.impersonation_chain = google_impersonation_chain
def execute(self, context: Context) -> str:
glacier_hook = GlacierHook(aws_conn_id=self.aws_conn_id)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
job_id = glacier_hook.retrieve_inventory(vault_name=self.vault_name)
with tempfile.NamedTemporaryFile() as temp_file:
glacier_data = glacier_hook.retrieve_inventory_results(
vault_name=self.vault_name, job_id=job_id["jobId"]
)
# Read the file content in chunks using StreamingBody
# https://botocore.amazonaws.com/v1/documentation/api/latest/reference/response.html
stream = glacier_data["body"]
for chunk in stream.iter_chunk(chunk_size=self.chunk_size):
temp_file.write(chunk)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=temp_file.name,
gzip=self.gzip,
)
return f"gs://{self.bucket_name}/{self.object_name}"
| 4,702 | 41.369369 | 96 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/exasol_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Transfers data from Exasol database into a S3 Bucket."""
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.exasol.hooks.exasol import ExasolHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class ExasolToS3Operator(BaseOperator):
"""
Export data from Exasol database to AWS S3 bucket.
:param query_or_table: the sql statement to be executed or table name to export
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which to store the file
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param gzip: If True, the file will be compressed locally
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:param query_params: Query parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
:param export_params: Extra parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
"""
template_fields: Sequence[str] = ("query_or_table", "key", "bucket_name", "query_params", "export_params")
template_fields_renderers = {"query_or_table": "sql", "query_params": "json", "export_params": "json"}
template_ext: Sequence[str] = (".sql",)
ui_color = "#ededed"
def __init__(
self,
*,
query_or_table: str,
key: str,
bucket_name: str | None = None,
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str | None = None,
query_params: dict | None = None,
export_params: dict | None = None,
exasol_conn_id: str = "exasol_default",
aws_conn_id: str = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.query_or_table = query_or_table
self.key = key
self.bucket_name = bucket_name
self.replace = replace
self.encrypt = encrypt
self.gzip = gzip
self.acl_policy = acl_policy
self.query_params = query_params
self.export_params = export_params
self.exasol_conn_id = exasol_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
exasol_hook = ExasolHook(exasol_conn_id=self.exasol_conn_id)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
with NamedTemporaryFile("w+") as file:
exasol_hook.export_to_file(
filename=file.name,
query_or_table=self.query_or_table,
export_params=self.export_params,
query_params=self.query_params,
)
file.flush()
self.log.info("Uploading the data as %s", self.key)
s3_hook.load_file(
filename=file.name,
key=self.key,
bucket_name=self.bucket_name,
replace=self.replace,
encrypt=self.encrypt,
gzip=self.gzip,
acl_policy=self.acl_policy,
)
self.log.info("Data uploaded")
return self.key
| 4,410 | 38.738739 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/s3_to_ftp.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.ftp.hooks.ftp import FTPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class S3ToFTPOperator(BaseOperator):
"""
This operator enables the transferring of files from S3 to a FTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToFTPOperator`
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket from
where the file is downloaded.
:param s3_key: The targeted s3 key. This is the specified file path for
downloading the file from S3.
:param ftp_path: The ftp remote path. This is the specified file path for
uploading file to the FTP server.
:param aws_conn_id: reference to a specific AWS connection
:param ftp_conn_id: The ftp connection id. The name or identifier for
establishing a connection to the FTP server.
"""
template_fields: Sequence[str] = ("s3_bucket", "s3_key", "ftp_path")
def __init__(
self,
*,
s3_bucket,
s3_key,
ftp_path,
aws_conn_id="aws_default",
ftp_conn_id="ftp_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.ftp_path = ftp_path
self.aws_conn_id = aws_conn_id
self.ftp_conn_id = ftp_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(self.aws_conn_id)
ftp_hook = FTPHook(ftp_conn_id=self.ftp_conn_id)
s3_obj = s3_hook.get_key(self.s3_key, self.s3_bucket)
with NamedTemporaryFile() as local_tmp_file:
self.log.info("Downloading file from %s", self.s3_key)
s3_obj.download_fileobj(local_tmp_file)
local_tmp_file.seek(0)
ftp_hook.store_file(self.ftp_path, local_tmp_file.name)
self.log.info("File stored in %s", {self.ftp_path})
| 2,966 | 35.62963 | 83 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/ftp_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.ftp.hooks.ftp import FTPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class FTPToS3Operator(BaseOperator):
"""
Transfer of one or more files from an FTP server to S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FTPToS3Operator`
:param ftp_path: The ftp remote path. For one file it is mandatory to include the file as well.
For multiple files, it is the route where the files will be found.
:param s3_bucket: The targeted s3 bucket in which to upload the file(s).
:param s3_key: The targeted s3 key. For one file it must include the file path. For several,
it must end with "/".
:param ftp_filenames: Only used if you want to move multiple files. You can pass a list
with exact filenames present in the ftp path, or a prefix that all files must meet. It
can also be the string '*' for moving all the files within the ftp path.
:param s3_filenames: Only used if you want to move multiple files and name them different from
the originals from the ftp. It can be a list of filenames or file prefix (that will replace
the ftp prefix).
:param ftp_conn_id: The ftp connection id. The name or identifier for
establishing a connection to the FTP server.
:param aws_conn_id: The s3 connection id. The name or identifier for
establishing a connection to S3.
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param gzip: If True, the file will be compressed locally
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
"""
template_fields: Sequence[str] = ("ftp_path", "s3_bucket", "s3_key", "ftp_filenames", "s3_filenames")
def __init__(
self,
*,
ftp_path: str,
s3_bucket: str,
s3_key: str,
ftp_filenames: str | list[str] | None = None,
s3_filenames: str | list[str] | None = None,
ftp_conn_id: str = "ftp_default",
aws_conn_id: str = "aws_default",
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.ftp_path = ftp_path
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.ftp_filenames = ftp_filenames
self.s3_filenames = s3_filenames
self.aws_conn_id = aws_conn_id
self.ftp_conn_id = ftp_conn_id
self.replace = replace
self.encrypt = encrypt
self.gzip = gzip
self.acl_policy = acl_policy
self.s3_hook: S3Hook | None = None
self.ftp_hook: FTPHook | None = None
def __upload_to_s3_from_ftp(self, remote_filename, s3_file_key):
with NamedTemporaryFile() as local_tmp_file:
self.ftp_hook.retrieve_file(
remote_full_path=remote_filename, local_full_path_or_buffer=local_tmp_file.name
)
self.s3_hook.load_file(
filename=local_tmp_file.name,
key=s3_file_key,
bucket_name=self.s3_bucket,
replace=self.replace,
encrypt=self.encrypt,
gzip=self.gzip,
acl_policy=self.acl_policy,
)
self.log.info("File upload to %s", s3_file_key)
def execute(self, context: Context):
self.ftp_hook = FTPHook(ftp_conn_id=self.ftp_conn_id)
self.s3_hook = S3Hook(self.aws_conn_id)
if self.ftp_filenames:
if isinstance(self.ftp_filenames, str):
self.log.info("Getting files in %s", self.ftp_path)
list_dir = self.ftp_hook.list_directory(
path=self.ftp_path,
)
if self.ftp_filenames == "*":
files = list_dir
else:
ftp_filename: str = self.ftp_filenames
files = list(filter(lambda f: ftp_filename in f, list_dir))
for file in files:
self.log.info("Moving file %s", file)
if self.s3_filenames and isinstance(self.s3_filenames, str):
filename = file.replace(self.ftp_filenames, self.s3_filenames)
else:
filename = file
s3_file_key = f"{self.s3_key}{filename}"
self.__upload_to_s3_from_ftp(file, s3_file_key)
else:
if self.s3_filenames:
for ftp_file, s3_file in zip(self.ftp_filenames, self.s3_filenames):
self.__upload_to_s3_from_ftp(self.ftp_path + ftp_file, self.s3_key + s3_file)
else:
for ftp_file in self.ftp_filenames:
self.__upload_to_s3_from_ftp(self.ftp_path + ftp_file, self.s3_key + ftp_file)
else:
self.__upload_to_s3_from_ftp(self.ftp_path, self.s3_key)
| 6,368 | 40.901316 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/s3_to_sftp.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from urllib.parse import urlsplit
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.ssh.hooks.ssh import SSHHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class S3ToSFTPOperator(BaseOperator):
"""
This operator enables the transferring of files from S3 to a SFTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToSFTPOperator`
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param sftp_path: The sftp remote path. This is the specified file path for
uploading file to the SFTP server.
:param aws_conn_id: aws connection to use
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket from
where the file is downloaded.
:param s3_key: The targeted s3 key. This is the specified file path for
downloading the file from S3.
"""
template_fields: Sequence[str] = ("s3_key", "sftp_path", "s3_bucket")
def __init__(
self,
*,
s3_bucket: str,
s3_key: str,
sftp_path: str,
sftp_conn_id: str = "ssh_default",
aws_conn_id: str = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sftp_conn_id = sftp_conn_id
self.sftp_path = sftp_path
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.aws_conn_id = aws_conn_id
@staticmethod
def get_s3_key(s3_key: str) -> str:
"""This parses the correct format for S3 keys regardless of how the S3 url is passed."""
parsed_s3_key = urlsplit(s3_key)
return parsed_s3_key.path.lstrip("/")
def execute(self, context: Context) -> None:
self.s3_key = self.get_s3_key(self.s3_key)
ssh_hook = SSHHook(ssh_conn_id=self.sftp_conn_id)
s3_hook = S3Hook(self.aws_conn_id)
s3_client = s3_hook.get_conn()
sftp_client = ssh_hook.get_conn().open_sftp()
with NamedTemporaryFile("w") as f:
s3_client.download_file(self.s3_bucket, self.s3_key, f.name)
sftp_client.put(f.name, self.sftp_path)
| 3,191 | 35.689655 | 96 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/google_api_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module allows you to transfer data from any Google API endpoint into a S3 Bucket."""
from __future__ import annotations
import json
import sys
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.models.xcom import MAX_XCOM_SIZE, XCOM_RETURN_KEY
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.google.common.hooks.discovery_api import GoogleDiscoveryApiHook
if TYPE_CHECKING:
from airflow.models import TaskInstance
from airflow.serialization.pydantic.taskinstance import TaskInstancePydantic
from airflow.utils.context import Context
class GoogleApiToS3Operator(BaseOperator):
"""
Basic class for transferring data from a Google API endpoint into a S3 Bucket.
This discovery-based operator use
:class:`~airflow.providers.google.common.hooks.discovery_api.GoogleDiscoveryApiHook` to communicate
with Google Services via the
`Google API Python Client <https://github.com/googleapis/google-api-python-client>`__.
Please note that this library is in maintenance mode hence it won't fully support Google Cloud in
the future.
Therefore it is recommended that you use the custom Google Cloud Service Operators for working
with the Google Cloud Platform.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleApiToS3Operator`
:param google_api_service_name: The specific API service that is being requested.
:param google_api_service_version: The version of the API that is being requested.
:param google_api_endpoint_path: The client libraries path to the api call's executing method.
For example: 'analyticsreporting.reports.batchGet'
.. note:: See https://developers.google.com/apis-explorer
for more information on which methods are available.
:param google_api_endpoint_params: The params to control the corresponding endpoint result.
:param s3_destination_key: The url where to put the data retrieved from the endpoint in S3.
.. note See https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html
for valid url formats.
:param google_api_response_via_xcom: Can be set to expose the google api response to xcom.
:param google_api_endpoint_params_via_xcom: If set to a value this value will be used as a key
for pulling from xcom and updating the google api endpoint params.
:param google_api_endpoint_params_via_xcom_task_ids: Task ids to filter xcom by.
:param google_api_pagination: If set to True Pagination will be enabled for this request
to retrieve all data.
.. note:: This means the response will be a list of responses.
:param google_api_num_retries: Define the number of retries for the Google API requests being made
if it fails.
:param s3_overwrite: Specifies whether the s3 file will be overwritten if exists.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param aws_conn_id: The connection id specifying the authentication information for the S3 Bucket.
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"google_api_endpoint_params",
"s3_destination_key",
"google_impersonation_chain",
"gcp_conn_id",
)
template_ext: Sequence[str] = ()
ui_color = "#cc181e"
def __init__(
self,
*,
google_api_service_name: str,
google_api_service_version: str,
google_api_endpoint_path: str,
google_api_endpoint_params: dict,
s3_destination_key: str,
google_api_response_via_xcom: str | None = None,
google_api_endpoint_params_via_xcom: str | None = None,
google_api_endpoint_params_via_xcom_task_ids: str | None = None,
google_api_pagination: bool = False,
google_api_num_retries: int = 0,
s3_overwrite: bool = False,
gcp_conn_id: str = "google_cloud_default",
aws_conn_id: str = "aws_default",
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.google_api_service_name = google_api_service_name
self.google_api_service_version = google_api_service_version
self.google_api_endpoint_path = google_api_endpoint_path
self.google_api_endpoint_params = google_api_endpoint_params
self.s3_destination_key = s3_destination_key
self.google_api_response_via_xcom = google_api_response_via_xcom
self.google_api_endpoint_params_via_xcom = google_api_endpoint_params_via_xcom
self.google_api_endpoint_params_via_xcom_task_ids = google_api_endpoint_params_via_xcom_task_ids
self.google_api_pagination = google_api_pagination
self.google_api_num_retries = google_api_num_retries
self.s3_overwrite = s3_overwrite
self.gcp_conn_id = gcp_conn_id
self.aws_conn_id = aws_conn_id
self.google_impersonation_chain = google_impersonation_chain
def execute(self, context: Context) -> None:
"""
Transfers Google APIs json data to S3.
:param context: The context that is being provided when executing.
"""
self.log.info("Transferring data from %s to s3", self.google_api_service_name)
if self.google_api_endpoint_params_via_xcom:
self._update_google_api_endpoint_params_via_xcom(context["task_instance"])
data = self._retrieve_data_from_google_api()
self._load_data_to_s3(data)
if self.google_api_response_via_xcom:
self._expose_google_api_response_via_xcom(context["task_instance"], data)
def _retrieve_data_from_google_api(self) -> dict:
google_discovery_api_hook = GoogleDiscoveryApiHook(
gcp_conn_id=self.gcp_conn_id,
api_service_name=self.google_api_service_name,
api_version=self.google_api_service_version,
impersonation_chain=self.google_impersonation_chain,
)
return google_discovery_api_hook.query(
endpoint=self.google_api_endpoint_path,
data=self.google_api_endpoint_params,
paginate=self.google_api_pagination,
num_retries=self.google_api_num_retries,
)
def _load_data_to_s3(self, data: dict) -> None:
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
s3_hook.load_string(
string_data=json.dumps(data),
bucket_name=S3Hook.parse_s3_url(self.s3_destination_key)[0],
key=S3Hook.parse_s3_url(self.s3_destination_key)[1],
replace=self.s3_overwrite,
)
def _update_google_api_endpoint_params_via_xcom(
self, task_instance: TaskInstance | TaskInstancePydantic
) -> None:
if self.google_api_endpoint_params_via_xcom:
google_api_endpoint_params = task_instance.xcom_pull(
task_ids=self.google_api_endpoint_params_via_xcom_task_ids,
key=self.google_api_endpoint_params_via_xcom,
)
self.google_api_endpoint_params.update(google_api_endpoint_params)
def _expose_google_api_response_via_xcom(
self, task_instance: TaskInstance | TaskInstancePydantic, data: dict
) -> None:
if sys.getsizeof(data) < MAX_XCOM_SIZE:
task_instance.xcom_push(key=self.google_api_response_via_xcom or XCOM_RETURN_KEY, value=data)
else:
raise RuntimeError("The size of the downloaded data is too large to push to XCom!")
| 9,042 | 45.613402 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/s3_to_sql.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Callable, Iterable, Sequence
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
if TYPE_CHECKING:
from airflow.utils.context import Context
class S3ToSqlOperator(BaseOperator):
"""Load Data from S3 into a SQL Database.
You need to provide a parser function that takes a filename as an input
and returns an iterable of rows
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToSqlOperator`
:param schema: reference to a specific schema in SQL database
:param table: reference to a specific table in SQL database
:param s3_bucket: reference to a specific S3 bucket
:param s3_key: reference to a specific S3 key
:param sql_conn_id: reference to a specific SQL database. Must be of type DBApiHook
:param aws_conn_id: reference to a specific S3 / AWS connection
:param column_list: list of column names to use in the insert SQL.
:param commit_every: The maximum number of rows to insert in one
transaction. Set to `0` to insert all rows in one transaction.
:param parser: parser function that takes a filepath as input and returns an iterable.
e.g. to use a CSV parser that yields rows line-by-line, pass the following
function:
.. code-block:: python
def parse_csv(filepath):
import csv
with open(filepath, newline="") as file:
yield from csv.reader(file)
"""
template_fields: Sequence[str] = (
"s3_bucket",
"s3_key",
"schema",
"table",
"column_list",
"sql_conn_id",
)
template_ext: Sequence[str] = ()
ui_color = "#f4a460"
def __init__(
self,
*,
s3_key: str,
s3_bucket: str,
table: str,
parser: Callable[[str], Iterable[Iterable]],
column_list: list[str] | None = None,
commit_every: int = 1000,
schema: str | None = None,
sql_conn_id: str = "sql_default",
aws_conn_id: str = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.table = table
self.schema = schema
self.aws_conn_id = aws_conn_id
self.sql_conn_id = sql_conn_id
self.column_list = column_list
self.commit_every = commit_every
self.parser = parser
def execute(self, context: Context) -> None:
self.log.info("Loading %s to SQL table %s...", self.s3_key, self.table)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
s3_obj = s3_hook.get_key(key=self.s3_key, bucket_name=self.s3_bucket)
with NamedTemporaryFile() as local_tempfile:
s3_obj.download_fileobj(local_tempfile)
local_tempfile.flush()
local_tempfile.seek(0)
self.db_hook.insert_rows(
table=self.table,
schema=self.schema,
target_fields=self.column_list,
rows=self.parser(local_tempfile.name),
commit_every=self.commit_every,
)
@cached_property
def db_hook(self):
self.log.debug("Get connection for %s", self.sql_conn_id)
hook = BaseHook.get_hook(self.sql_conn_id)
if not callable(getattr(hook, "insert_rows", None)):
raise AirflowException(
"This hook is not supported. The hook class must have an `insert_rows` method."
)
return hook
| 4,634 | 34.930233 | 95 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/s3_to_redshift.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.redshift_data import RedshiftDataHook
from airflow.providers.amazon.aws.hooks.redshift_sql import RedshiftSQLHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.utils.redshift import build_credentials_block
if TYPE_CHECKING:
from airflow.utils.context import Context
AVAILABLE_METHODS = ["APPEND", "REPLACE", "UPSERT"]
class S3ToRedshiftOperator(BaseOperator):
"""
Executes an COPY command to load files from s3 to Redshift.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToRedshiftOperator`
:param schema: reference to a specific schema in redshift database
:param table: reference to a specific table in redshift database
:param s3_bucket: reference to a specific S3 bucket
:param s3_key: key prefix that selects single or multiple objects from S3
:param redshift_conn_id: reference to a specific redshift database OR a redshift data-api connection
:param aws_conn_id: reference to a specific S3 connection
If the AWS connection contains 'aws_iam_role' in ``extras``
the operator will use AWS STS credentials with a token
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param column_list: list of column names to load
:param copy_options: reference to a list of COPY options
:param method: Action to be performed on execution. Available ``APPEND``, ``UPSERT`` and ``REPLACE``.
:param upsert_keys: List of fields to use as key on upsert action
:param redshift_data_api_kwargs: If using the Redshift Data API instead of the SQL-based connection,
dict of arguments for the hook's ``execute_query`` method.
Cannot include any of these kwargs: ``{'sql', 'parameters'}``
"""
template_fields: Sequence[str] = (
"s3_bucket",
"s3_key",
"schema",
"table",
"column_list",
"copy_options",
"redshift_conn_id",
"method",
)
template_ext: Sequence[str] = ()
ui_color = "#99e699"
def __init__(
self,
*,
schema: str,
table: str,
s3_bucket: str,
s3_key: str,
redshift_conn_id: str = "redshift_default",
aws_conn_id: str = "aws_default",
verify: bool | str | None = None,
column_list: list[str] | None = None,
copy_options: list | None = None,
autocommit: bool = False,
method: str = "APPEND",
upsert_keys: list[str] | None = None,
redshift_data_api_kwargs: dict = {},
**kwargs,
) -> None:
super().__init__(**kwargs)
self.schema = schema
self.table = table
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.redshift_conn_id = redshift_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.column_list = column_list
self.copy_options = copy_options or []
self.autocommit = autocommit
self.method = method
self.upsert_keys = upsert_keys
self.redshift_data_api_kwargs = redshift_data_api_kwargs
if self.redshift_data_api_kwargs:
for arg in ["sql", "parameters"]:
if arg in self.redshift_data_api_kwargs.keys():
raise AirflowException(f"Cannot include param '{arg}' in Redshift Data API kwargs")
def _build_copy_query(
self, copy_destination: str, credentials_block: str, region_info: str, copy_options: str
) -> str:
column_names = "(" + ", ".join(self.column_list) + ")" if self.column_list else ""
return f"""
COPY {copy_destination} {column_names}
FROM 's3://{self.s3_bucket}/{self.s3_key}'
credentials
'{credentials_block}'
{region_info}
{copy_options};
"""
def execute(self, context: Context) -> None:
if self.method not in AVAILABLE_METHODS:
raise AirflowException(f"Method not found! Available methods: {AVAILABLE_METHODS}")
redshift_hook: RedshiftDataHook | RedshiftSQLHook
if self.redshift_data_api_kwargs:
redshift_hook = RedshiftDataHook(aws_conn_id=self.redshift_conn_id)
else:
redshift_hook = RedshiftSQLHook(redshift_conn_id=self.redshift_conn_id)
conn = S3Hook.get_connection(conn_id=self.aws_conn_id)
region_info = ""
if conn.extra_dejson.get("region", False):
region_info = f"region '{conn.extra_dejson['region']}'"
if conn.extra_dejson.get("role_arn", False):
credentials_block = f"aws_iam_role={conn.extra_dejson['role_arn']}"
else:
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
credentials = s3_hook.get_credentials()
credentials_block = build_credentials_block(credentials)
copy_options = "\n\t\t\t".join(self.copy_options)
destination = f"{self.schema}.{self.table}"
copy_destination = f"#{self.table}" if self.method == "UPSERT" else destination
copy_statement = self._build_copy_query(
copy_destination, credentials_block, region_info, copy_options
)
sql: str | Iterable[str]
if self.method == "REPLACE":
sql = ["BEGIN;", f"DELETE FROM {destination};", copy_statement, "COMMIT"]
elif self.method == "UPSERT":
if isinstance(redshift_hook, RedshiftDataHook):
keys = self.upsert_keys or redshift_hook.get_table_primary_key(
table=self.table, schema=self.schema, **self.redshift_data_api_kwargs
)
else:
keys = self.upsert_keys or redshift_hook.get_table_primary_key(self.table, self.schema)
if not keys:
raise AirflowException(
f"No primary key on {self.schema}.{self.table}. Please provide keys on 'upsert_keys'"
)
where_statement = " AND ".join([f"{self.table}.{k} = {copy_destination}.{k}" for k in keys])
sql = [
f"CREATE TABLE {copy_destination} (LIKE {destination} INCLUDING DEFAULTS);",
copy_statement,
"BEGIN;",
f"DELETE FROM {destination} USING {copy_destination} WHERE {where_statement};",
f"INSERT INTO {destination} SELECT * FROM {copy_destination};",
"COMMIT",
]
else:
sql = copy_statement
self.log.info("Executing COPY command...")
if isinstance(redshift_hook, RedshiftDataHook):
redshift_hook.execute_query(sql=sql, **self.redshift_data_api_kwargs)
else:
redshift_hook.run(sql, autocommit=self.autocommit)
self.log.info("COPY command complete...")
| 8,550 | 42.186869 | 106 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/sql_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import enum
from collections import namedtuple
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Iterable, Literal, Mapping, Sequence
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.common.sql.hooks.sql import DbApiHook
if TYPE_CHECKING:
from pandas import DataFrame
from airflow.utils.context import Context
class FILE_FORMAT(enum.Enum):
"""Possible file formats."""
CSV = enum.auto()
JSON = enum.auto()
PARQUET = enum.auto()
FileOptions = namedtuple("FileOptions", ["mode", "suffix", "function"])
FILE_OPTIONS_MAP = {
FILE_FORMAT.CSV: FileOptions("r+", ".csv", "to_csv"),
FILE_FORMAT.JSON: FileOptions("r+", ".json", "to_json"),
FILE_FORMAT.PARQUET: FileOptions("rb+", ".parquet", "to_parquet"),
}
class SqlToS3Operator(BaseOperator):
"""
Saves data from a specific SQL query into a file in S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SqlToS3Operator`
:param query: the sql query to be executed. If you want to execute a file, place the absolute path of it,
ending with .sql extension. (templated)
:param s3_bucket: bucket where the data will be stored. (templated)
:param s3_key: desired key for the file. It includes the name of the file. (templated)
:param replace: whether or not to replace the file in S3 if it previously existed
:param sql_conn_id: reference to a specific database.
:param parameters: (optional) the parameters to render the SQL query with.
:param aws_conn_id: reference to a specific S3 connection
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param file_format: the destination file format, only string 'csv', 'json' or 'parquet' is accepted.
:param pd_kwargs: arguments to include in DataFrame ``.to_parquet()``, ``.to_json()`` or ``.to_csv()``.
:param groupby_kwargs: argument to include in DataFrame ``groupby()``.
"""
template_fields: Sequence[str] = (
"s3_bucket",
"s3_key",
"query",
"sql_conn_id",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {
"query": "sql",
"pd_kwargs": "json",
}
def __init__(
self,
*,
query: str,
s3_bucket: str,
s3_key: str,
sql_conn_id: str,
parameters: None | Mapping | Iterable = None,
replace: bool = False,
aws_conn_id: str = "aws_default",
verify: bool | str | None = None,
file_format: Literal["csv", "json", "parquet"] = "csv",
pd_kwargs: dict | None = None,
groupby_kwargs: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.query = query
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.sql_conn_id = sql_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.replace = replace
self.pd_kwargs = pd_kwargs or {}
self.parameters = parameters
self.groupby_kwargs = groupby_kwargs or {}
if "path_or_buf" in self.pd_kwargs:
raise AirflowException("The argument path_or_buf is not allowed, please remove it")
try:
self.file_format = FILE_FORMAT[file_format.upper()]
except KeyError:
raise AirflowException(f"The argument file_format doesn't support {file_format} value.")
@staticmethod
def _fix_dtypes(df: DataFrame, file_format: FILE_FORMAT) -> None:
"""
Mutate DataFrame to set dtypes for float columns containing NaN values.
Set dtype of object to str to allow for downstream transformations.
"""
try:
import numpy as np
from pandas import Float64Dtype, Int64Dtype
except ImportError as e:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(e)
for col in df:
if df[col].dtype.name == "object" and file_format == "parquet":
# if the type wasn't identified or converted, change it to a string so if can still be
# processed.
df[col] = df[col].astype(str)
if "float" in df[col].dtype.name and df[col].hasnans:
# inspect values to determine if dtype of non-null values is int or float
notna_series = df[col].dropna().values
if np.equal(notna_series, notna_series.astype(int)).all():
# set to dtype that retains integers and supports NaNs
# The type ignore can be removed here if https://github.com/numpy/numpy/pull/23690
# is merged and released as currently NumPy does not consider None as valid for x/y.
df[col] = np.where(df[col].isnull(), None, df[col]) # type: ignore[call-overload]
df[col] = df[col].astype(Int64Dtype())
elif np.isclose(notna_series, notna_series.astype(int)).all():
# set to float dtype that retains floats and supports NaNs
# The type ignore can be removed here if https://github.com/numpy/numpy/pull/23690
# is merged and released
df[col] = np.where(df[col].isnull(), None, df[col]) # type: ignore[call-overload]
df[col] = df[col].astype(Float64Dtype())
def execute(self, context: Context) -> None:
sql_hook = self._get_hook()
s3_conn = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
data_df = sql_hook.get_pandas_df(sql=self.query, parameters=self.parameters)
self.log.info("Data from SQL obtained")
self._fix_dtypes(data_df, self.file_format)
file_options = FILE_OPTIONS_MAP[self.file_format]
for group_name, df in self._partition_dataframe(df=data_df):
with NamedTemporaryFile(mode=file_options.mode, suffix=file_options.suffix) as tmp_file:
self.log.info("Writing data to temp file")
getattr(df, file_options.function)(tmp_file.name, **self.pd_kwargs)
self.log.info("Uploading data to S3")
object_key = f"{self.s3_key}_{group_name}" if group_name else self.s3_key
s3_conn.load_file(
filename=tmp_file.name, key=object_key, bucket_name=self.s3_bucket, replace=self.replace
)
def _partition_dataframe(self, df: DataFrame) -> Iterable[tuple[str, DataFrame]]:
"""Partition dataframe using pandas groupby() method."""
if not self.groupby_kwargs:
yield "", df
else:
grouped_df = df.groupby(**self.groupby_kwargs)
for group_label in grouped_df.groups.keys():
yield group_label, grouped_df.get_group(group_label).reset_index(drop=True)
def _get_hook(self) -> DbApiHook:
self.log.debug("Get connection for %s", self.sql_conn_id)
conn = BaseHook.get_connection(self.sql_conn_id)
hook = conn.get_hook()
if not callable(getattr(hook, "get_pandas_df", None)):
raise AirflowException(
"This hook is not supported. The hook class must have get_pandas_df method."
)
return hook
| 8,865 | 41.421053 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/transfers/azure_blob_to_s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import tempfile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureBlobStorageToS3Operator(BaseOperator):
"""
Operator transfers data from Azure Blob Storage to specified bucket in Amazon S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureBlobStorageToGCSOperator`
:param wasb_conn_id: Reference to the wasb connection.
:param container_name: Name of the container
:param prefix: Prefix string which filters objects whose name begin with
this prefix. (templated)
:param delimiter: The delimiter by which you want to filter the objects. (templated)
For e.g to lists the CSV files from in a directory in GCS you would use
delimiter='.csv'.
:param aws_conn_id: Connection id of the S3 connection to use
:param dest_s3_key: The base S3 key to be used to store the files. (templated)
:param dest_verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param dest_s3_extra_args: Extra arguments that may be passed to the download/upload operations.
:param replace: Whether or not to verify the existence of the files in the
destination bucket.
By default is set to False
If set to True, will upload all the files replacing the existing ones in
the destination bucket.
If set to False, will upload only the files that are in the origin but not
in the destination bucket.
:param s3_acl_policy: Optional The string to specify the canned ACL policy for the
object to be uploaded in S3
:param wasb_extra_kargs: kwargs to pass to WasbHook
:param s3_extra_kargs: kwargs to pass to S3Hook
"""
template_fields: Sequence[str] = (
"container_name",
"prefix",
"delimiter",
"dest_s3_key",
)
def __init__(
self,
*,
wasb_conn_id: str = "wasb_default",
container_name: str,
prefix: str | None = None,
delimiter: str = "",
aws_conn_id: str = "aws_default",
dest_s3_key: str,
dest_verify: str | bool | None = None,
dest_s3_extra_args: dict | None = None,
replace: bool = False,
s3_acl_policy: str | None = None,
wasb_extra_args: dict = {},
s3_extra_args: dict = {},
**kwargs,
) -> None:
super().__init__(**kwargs)
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.prefix = prefix
self.delimiter = delimiter
self.aws_conn_id = aws_conn_id
self.dest_s3_key = dest_s3_key
self.dest_verify = dest_verify
self.dest_s3_extra_args = dest_s3_extra_args or {}
self.replace = replace
self.s3_acl_policy = s3_acl_policy
self.wasb_extra_args = wasb_extra_args
self.s3_extra_args = s3_extra_args
def execute(self, context: Context) -> list[str]:
# list all files in the Azure Blob Storage container
wasb_hook = WasbHook(wasb_conn_id=self.wasb_conn_id, **self.wasb_extra_args)
s3_hook = S3Hook(
aws_conn_id=self.aws_conn_id,
verify=self.dest_verify,
extra_args=self.dest_s3_extra_args,
**self.s3_extra_args,
)
self.log.info(
f"Getting list of the files in Container: {self.container_name}; "
f"Prefix: {self.prefix}; Delimiter: {self.delimiter};"
)
files = wasb_hook.get_blobs_list_recursive(
container_name=self.container_name, prefix=self.prefix, endswith=self.delimiter
)
if not self.replace:
# if we are not replacing -> list all files in the S3 bucket
# and only keep those files which are present in
# Azure Blob Storage and not in S3
bucket_name, prefix = S3Hook.parse_s3_url(self.dest_s3_key)
# look for the bucket and the prefix to avoid look into
# parent directories/keys
existing_files = s3_hook.list_keys(bucket_name, prefix=prefix)
# in case that no files exists, return an empty array to avoid errors
existing_files = existing_files if existing_files is not None else []
# remove the prefix for the existing files to allow the match
existing_files = [file.replace(f"{prefix}/", "", 1) for file in existing_files]
files = list(set(files) - set(existing_files))
if files:
for file in files:
with tempfile.NamedTemporaryFile() as temp_file:
dest_key = os.path.join(self.dest_s3_key, file)
self.log.info("Downloading data from blob: %s", file)
wasb_hook.get_file(
file_path=temp_file.name,
container_name=self.container_name,
blob_name=file,
)
self.log.info("Uploading data to s3: %s", dest_key)
s3_hook.load_file(
filename=temp_file.name,
key=dest_key,
replace=self.replace,
acl_policy=self.s3_acl_policy,
)
self.log.info("All done, uploaded %d files to S3", len(files))
else:
self.log.info("All files are already in sync!")
return files
| 7,019 | 40.785714 | 100 | py |
airflow | airflow-main/airflow/providers/amazon/aws/notifications/chime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from airflow.exceptions import AirflowOptionalProviderFeatureException
from airflow.providers.amazon.aws.hooks.chime import ChimeWebhookHook
from airflow.utils.context import Context
try:
from airflow.notifications.basenotifier import BaseNotifier
except ImportError:
raise AirflowOptionalProviderFeatureException(
"Failed to import BaseNotifier. This feature is only available in Airflow versions >= 2.6.0"
)
class ChimeNotifier(BaseNotifier):
"""
Chime notifier to send messages to a chime room via callbacks.
:param: chime_conn_id: The chime connection to use with Endpoint as "https://hooks.chime.aws" and
the webhook token in the form of ```{webhook.id}?token{webhook.token}```
:param: message: The message to send to the chime room associated with the webhook.
"""
template_fields = ("message",)
def __init__(self, *, chime_conn_id: str, message: str = "This is the default chime notifier message"):
super().__init__()
self.chime_conn_id = chime_conn_id
self.message = message
@cached_property
def hook(self):
"""To reduce overhead cache the hook for the notifier."""
return ChimeWebhookHook(chime_conn_id=self.chime_conn_id)
def notify(self, context: Context) -> None:
"""Send a message to a Chime Chat Room."""
self.hook.send_message(message=self.message)
send_chime_notification = ChimeNotifier
| 2,319 | 36.419355 | 107 | py |
airflow | airflow-main/airflow/providers/amazon/aws/notifications/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/emr.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import ast
import warnings
from datetime import timedelta
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from uuid import uuid4
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.emr import EmrContainerHook, EmrHook, EmrServerlessHook
from airflow.providers.amazon.aws.links.emr import EmrClusterLink, EmrLogsLink, get_log_uri
from airflow.providers.amazon.aws.triggers.emr import (
EmrAddStepsTrigger,
EmrContainerTrigger,
EmrCreateJobFlowTrigger,
EmrTerminateJobFlowTrigger,
)
from airflow.providers.amazon.aws.utils.waiter import waiter
from airflow.providers.amazon.aws.utils.waiter_with_logging import wait
from airflow.utils.helpers import exactly_one, prune_dict
from airflow.utils.types import NOTSET, ArgNotSet
if TYPE_CHECKING:
from airflow.utils.context import Context
class EmrAddStepsOperator(BaseOperator):
"""
An operator that adds steps to an existing EMR job_flow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrAddStepsOperator`
:param job_flow_id: id of the JobFlow to add steps to. (templated)
:param job_flow_name: name of the JobFlow to add steps to. Use as an alternative to passing
job_flow_id. will search for id of JobFlow with matching name in one of the states in
param cluster_states. Exactly one cluster like this should exist or will fail. (templated)
:param cluster_states: Acceptable cluster states when searching for JobFlow id by job_flow_name.
(templated)
:param aws_conn_id: aws connection to uses
:param steps: boto3 style steps or reference to a steps file (must be '.json') to
be added to the jobflow. (templated)
:param wait_for_completion: If True, the operator will wait for all the steps to be completed.
:param execution_role_arn: The ARN of the runtime role for a step on the cluster.
:param do_xcom_push: if True, job_flow_id is pushed to XCom with key job_flow_id.
:param wait_for_completion: Whether to wait for job run completion. (default: True)
:param deferrable: If True, the operator will wait asynchronously for the job to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"job_flow_id",
"job_flow_name",
"cluster_states",
"steps",
"execution_role_arn",
)
template_ext: Sequence[str] = (".json",)
template_fields_renderers = {"steps": "json"}
ui_color = "#f9c915"
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
job_flow_id: str | None = None,
job_flow_name: str | None = None,
cluster_states: list[str] | None = None,
aws_conn_id: str = "aws_default",
steps: list[dict] | str | None = None,
wait_for_completion: bool = False,
waiter_delay: int | None = None,
waiter_max_attempts: int | None = None,
execution_role_arn: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
if not exactly_one(job_flow_id is None, job_flow_name is None):
raise AirflowException("Exactly one of job_flow_id or job_flow_name must be specified.")
super().__init__(**kwargs)
cluster_states = cluster_states or []
steps = steps or []
self.aws_conn_id = aws_conn_id
self.job_flow_id = job_flow_id
self.job_flow_name = job_flow_name
self.cluster_states = cluster_states
self.steps = steps
self.wait_for_completion = False if deferrable else wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.execution_role_arn = execution_role_arn
self.deferrable = deferrable
def execute(self, context: Context) -> list[str]:
emr_hook = EmrHook(aws_conn_id=self.aws_conn_id)
job_flow_id = self.job_flow_id or emr_hook.get_cluster_id_by_name(
str(self.job_flow_name), self.cluster_states
)
if not job_flow_id:
raise AirflowException(f"No cluster found for name: {self.job_flow_name}")
if self.do_xcom_push:
context["ti"].xcom_push(key="job_flow_id", value=job_flow_id)
EmrClusterLink.persist(
context=context,
operator=self,
region_name=emr_hook.conn_region_name,
aws_partition=emr_hook.conn_partition,
job_flow_id=job_flow_id,
)
EmrLogsLink.persist(
context=context,
operator=self,
region_name=emr_hook.conn_region_name,
aws_partition=emr_hook.conn_partition,
job_flow_id=self.job_flow_id,
log_uri=get_log_uri(emr_client=emr_hook.conn, job_flow_id=job_flow_id),
)
self.log.info("Adding steps to %s", job_flow_id)
# steps may arrive as a string representing a list
# e.g. if we used XCom or a file then: steps="[{ step1 }, { step2 }]"
steps = self.steps
if isinstance(steps, str):
steps = ast.literal_eval(steps)
step_ids = emr_hook.add_job_flow_steps(
job_flow_id=job_flow_id,
steps=steps,
wait_for_completion=self.wait_for_completion,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
execution_role_arn=self.execution_role_arn,
)
if self.deferrable:
self.defer(
trigger=EmrAddStepsTrigger(
job_flow_id=job_flow_id,
step_ids=step_ids,
aws_conn_id=self.aws_conn_id,
max_attempts=self.waiter_max_attempts,
poll_interval=self.waiter_delay,
),
method_name="execute_complete",
)
return step_ids
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error resuming cluster: {event}")
else:
self.log.info("Steps completed successfully")
return event["step_ids"]
class EmrStartNotebookExecutionOperator(BaseOperator):
"""
An operator that starts an EMR notebook execution.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrStartNotebookExecutionOperator`
:param editor_id: The unique identifier of the EMR notebook to use for notebook execution.
:param relative_path: The path and file name of the notebook file for this execution,
relative to the path specified for the EMR notebook.
:param cluster_id: The unique identifier of the EMR cluster the notebook is attached to.
:param service_role: The name or ARN of the IAM role that is used as the service role
for Amazon EMR (the EMR role) for the notebook execution.
:param notebook_execution_name: Optional name for the notebook execution.
:param notebook_params: Input parameters in JSON format passed to the EMR notebook at
runtime for execution.
:param: notebook_instance_security_group_id: The unique identifier of the Amazon EC2
security group to associate with the EMR notebook for this notebook execution.
:param: master_instance_security_group_id: Optional unique ID of an EC2 security
group to associate with the master instance of the EMR cluster for this notebook execution.
:param tags: Optional list of key value pair to associate with the notebook execution.
:param waiter_max_attempts: Maximum number of tries before failing.
:param waiter_delay: Number of seconds between polling the state of the notebook.
:param waiter_countdown: Total amount of time the operator will wait for the notebook to stop.
Defaults to 25 * 60 seconds. (Deprecated. Please use waiter_max_attempts.)
:param waiter_check_interval_seconds: Number of seconds between polling the state of the notebook.
Defaults to 60 seconds. (Deprecated. Please use waiter_delay.)
"""
template_fields: Sequence[str] = (
"editor_id",
"cluster_id",
"relative_path",
"service_role",
"notebook_execution_name",
"notebook_params",
"notebook_instance_security_group_id",
"master_instance_security_group_id",
"tags",
"waiter_delay",
"waiter_max_attempts",
)
def __init__(
self,
editor_id: str,
relative_path: str,
cluster_id: str,
service_role: str,
notebook_execution_name: str | None = None,
notebook_params: str | None = None,
notebook_instance_security_group_id: str | None = None,
master_instance_security_group_id: str | None = None,
tags: list | None = None,
wait_for_completion: bool = False,
aws_conn_id: str = "aws_default",
# TODO: waiter_max_attempts and waiter_delay should default to None when the other two are deprecated.
waiter_max_attempts: int | None | ArgNotSet = NOTSET,
waiter_delay: int | None | ArgNotSet = NOTSET,
waiter_countdown: int = 25 * 60,
waiter_check_interval_seconds: int = 60,
**kwargs: Any,
):
if waiter_max_attempts is NOTSET:
warnings.warn(
"The parameter waiter_countdown has been deprecated to standardize "
"naming conventions. Please use waiter_max_attempts instead. In the "
"future this will default to None and defer to the waiter's default value."
)
waiter_max_attempts = waiter_countdown // waiter_check_interval_seconds
if waiter_delay is NOTSET:
warnings.warn(
"The parameter waiter_check_interval_seconds has been deprecated to "
"standardize naming conventions. Please use waiter_delay instead. In the "
"future this will default to None and defer to the waiter's default value."
)
waiter_delay = waiter_check_interval_seconds
super().__init__(**kwargs)
self.editor_id = editor_id
self.relative_path = relative_path
self.service_role = service_role
self.notebook_execution_name = notebook_execution_name or f"emr_notebook_{uuid4()}"
self.notebook_params = notebook_params or ""
self.notebook_instance_security_group_id = notebook_instance_security_group_id or ""
self.tags = tags or []
self.wait_for_completion = wait_for_completion
self.cluster_id = cluster_id
self.aws_conn_id = aws_conn_id
self.waiter_max_attempts = waiter_max_attempts
self.waiter_delay = waiter_delay
self.master_instance_security_group_id = master_instance_security_group_id
def execute(self, context: Context):
execution_engine = {
"Id": self.cluster_id,
"Type": "EMR",
"MasterInstanceSecurityGroupId": self.master_instance_security_group_id or "",
}
emr_hook = EmrHook(aws_conn_id=self.aws_conn_id)
response = emr_hook.conn.start_notebook_execution(
EditorId=self.editor_id,
RelativePath=self.relative_path,
NotebookExecutionName=self.notebook_execution_name,
NotebookParams=self.notebook_params,
ExecutionEngine=execution_engine,
ServiceRole=self.service_role,
NotebookInstanceSecurityGroupId=self.notebook_instance_security_group_id,
Tags=self.tags,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Starting notebook execution failed: {response}")
self.log.info("Notebook execution started: %s", response["NotebookExecutionId"])
notebook_execution_id = response["NotebookExecutionId"]
if self.wait_for_completion:
emr_hook.get_waiter("notebook_running").wait(
NotebookExecutionId=notebook_execution_id,
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
)
# The old Waiter method raised an exception if the notebook
# failed, adding that here. This could maybe be deprecated
# later to bring it in line with how other waiters behave.
failure_states = {"FAILED"}
final_status = emr_hook.conn.describe_notebook_execution(
NotebookExecutionId=notebook_execution_id
)["NotebookExecution"]["Status"]
if final_status in failure_states:
raise AirflowException(f"Notebook Execution reached failure state {final_status}.")
return notebook_execution_id
class EmrStopNotebookExecutionOperator(BaseOperator):
"""
An operator that stops a running EMR notebook execution.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrStopNotebookExecutionOperator`
:param notebook_execution_id: The unique identifier of the notebook execution.
:param wait_for_completion: If True, the operator will wait for the notebook.
to be in a STOPPED or FINISHED state. Defaults to False.
:param aws_conn_id: aws connection to use.
:param waiter_max_attempts: Maximum number of tries before failing.
:param waiter_delay: Number of seconds between polling the state of the notebook.
:param waiter_countdown: Total amount of time the operator will wait for the notebook to stop.
Defaults to 25 * 60 seconds. (Deprecated. Please use waiter_max_attempts.)
:param waiter_check_interval_seconds: Number of seconds between polling the state of the notebook.
Defaults to 60 seconds. (Deprecated. Please use waiter_delay.)
"""
template_fields: Sequence[str] = (
"notebook_execution_id",
"waiter_delay",
"waiter_max_attempts",
)
def __init__(
self,
notebook_execution_id: str,
wait_for_completion: bool = False,
aws_conn_id: str = "aws_default",
# TODO: waiter_max_attempts and waiter_delay should default to None when the other two are deprecated.
waiter_max_attempts: int | None | ArgNotSet = NOTSET,
waiter_delay: int | None | ArgNotSet = NOTSET,
waiter_countdown: int = 25 * 60,
waiter_check_interval_seconds: int = 60,
**kwargs: Any,
):
if waiter_max_attempts is NOTSET:
warnings.warn(
"The parameter waiter_countdown has been deprecated to standardize "
"naming conventions. Please use waiter_max_attempts instead. In the "
"future this will default to None and defer to the waiter's default value."
)
waiter_max_attempts = waiter_countdown // waiter_check_interval_seconds
if waiter_delay is NOTSET:
warnings.warn(
"The parameter waiter_check_interval_seconds has been deprecated to "
"standardize naming conventions. Please use waiter_delay instead. In the "
"future this will default to None and defer to the waiter's default value."
)
waiter_delay = waiter_check_interval_seconds
super().__init__(**kwargs)
self.notebook_execution_id = notebook_execution_id
self.wait_for_completion = wait_for_completion
self.aws_conn_id = aws_conn_id
self.waiter_max_attempts = waiter_max_attempts
self.waiter_delay = waiter_delay
def execute(self, context: Context) -> None:
emr_hook = EmrHook(aws_conn_id=self.aws_conn_id)
emr_hook.conn.stop_notebook_execution(NotebookExecutionId=self.notebook_execution_id)
if self.wait_for_completion:
emr_hook.get_waiter("notebook_stopped").wait(
NotebookExecutionId=self.notebook_execution_id,
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
)
class EmrEksCreateClusterOperator(BaseOperator):
"""
An operator that creates EMR on EKS virtual clusters.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrEksCreateClusterOperator`
:param virtual_cluster_name: The name of the EMR EKS virtual cluster to create.
:param eks_cluster_name: The EKS cluster used by the EMR virtual cluster.
:param eks_namespace: namespace used by the EKS cluster.
:param virtual_cluster_id: The EMR on EKS virtual cluster id.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param tags: The tags assigned to created cluster.
Defaults to None
"""
template_fields: Sequence[str] = (
"virtual_cluster_name",
"eks_cluster_name",
"eks_namespace",
)
ui_color = "#f9c915"
def __init__(
self,
*,
virtual_cluster_name: str,
eks_cluster_name: str,
eks_namespace: str,
virtual_cluster_id: str = "",
aws_conn_id: str = "aws_default",
tags: dict | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.virtual_cluster_name = virtual_cluster_name
self.eks_cluster_name = eks_cluster_name
self.eks_namespace = eks_namespace
self.virtual_cluster_id = virtual_cluster_id
self.aws_conn_id = aws_conn_id
self.tags = tags
@cached_property
def hook(self) -> EmrContainerHook:
"""Create and return an EmrContainerHook."""
return EmrContainerHook(self.aws_conn_id)
def execute(self, context: Context) -> str | None:
"""Create EMR on EKS virtual Cluster."""
self.virtual_cluster_id = self.hook.create_emr_on_eks_cluster(
self.virtual_cluster_name, self.eks_cluster_name, self.eks_namespace, self.tags
)
return self.virtual_cluster_id
class EmrContainerOperator(BaseOperator):
"""
An operator that submits jobs to EMR on EKS virtual clusters.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrContainerOperator`
:param name: The name of the job run.
:param virtual_cluster_id: The EMR on EKS virtual cluster ID
:param execution_role_arn: The IAM role ARN associated with the job run.
:param release_label: The Amazon EMR release version to use for the job run.
:param job_driver: Job configuration details, e.g. the Spark job parameters.
:param configuration_overrides: The configuration overrides for the job run,
specifically either application configuration or monitoring configuration.
:param client_request_token: The client idempotency token of the job run request.
Use this if you want to specify a unique ID to prevent two jobs from getting started.
If no token is provided, a UUIDv4 token will be generated for you.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param wait_for_completion: Whether or not to wait in the operator for the job to complete.
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check query status on EMR
:param max_tries: Deprecated - use max_polling_attempts instead.
:param max_polling_attempts: Maximum number of times to wait for the job run to finish.
Defaults to None, which will poll until the job is *not* in a pending, submitted, or running state.
:param tags: The tags assigned to job runs.
Defaults to None
:param deferrable: Run operator in the deferrable mode.
"""
template_fields: Sequence[str] = (
"name",
"virtual_cluster_id",
"execution_role_arn",
"release_label",
"job_driver",
"configuration_overrides",
)
ui_color = "#f9c915"
def __init__(
self,
*,
name: str,
virtual_cluster_id: str,
execution_role_arn: str,
release_label: str,
job_driver: dict,
configuration_overrides: dict | None = None,
client_request_token: str | None = None,
aws_conn_id: str = "aws_default",
wait_for_completion: bool = True,
poll_interval: int = 30,
max_tries: int | None = None,
tags: dict | None = None,
max_polling_attempts: int | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.name = name
self.virtual_cluster_id = virtual_cluster_id
self.execution_role_arn = execution_role_arn
self.release_label = release_label
self.job_driver = job_driver
self.configuration_overrides = configuration_overrides or {}
self.aws_conn_id = aws_conn_id
self.client_request_token = client_request_token or str(uuid4())
self.wait_for_completion = wait_for_completion
self.poll_interval = poll_interval
self.max_polling_attempts = max_polling_attempts
self.tags = tags
self.job_id: str | None = None
self.deferrable = deferrable
if max_tries:
warnings.warn(
f"Parameter `{self.__class__.__name__}.max_tries` is deprecated and will be removed "
"in a future release. Please use method `max_polling_attempts` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if max_polling_attempts and max_polling_attempts != max_tries:
raise Exception("max_polling_attempts must be the same value as max_tries")
else:
self.max_polling_attempts = max_tries
@cached_property
def hook(self) -> EmrContainerHook:
"""Create and return an EmrContainerHook."""
return EmrContainerHook(
self.aws_conn_id,
virtual_cluster_id=self.virtual_cluster_id,
)
def execute(self, context: Context) -> str | None:
"""Run job on EMR Containers."""
self.job_id = self.hook.submit_job(
self.name,
self.execution_role_arn,
self.release_label,
self.job_driver,
self.configuration_overrides,
self.client_request_token,
self.tags,
)
if self.deferrable:
query_status = self.hook.check_query_status(job_id=self.job_id)
self.check_failure(query_status)
if query_status in EmrContainerHook.SUCCESS_STATES:
return self.job_id
timeout = (
timedelta(seconds=self.max_polling_attempts * self.poll_interval)
if self.max_polling_attempts
else self.execution_timeout
)
self.defer(
timeout=timeout,
trigger=EmrContainerTrigger(
virtual_cluster_id=self.virtual_cluster_id,
job_id=self.job_id,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.poll_interval,
),
method_name="execute_complete",
)
if self.wait_for_completion:
query_status = self.hook.poll_query_status(
self.job_id,
max_polling_attempts=self.max_polling_attempts,
poll_interval=self.poll_interval,
)
self.check_failure(query_status)
if not query_status or query_status in EmrContainerHook.INTERMEDIATE_STATES:
raise AirflowException(
f"Final state of EMR Containers job is {query_status}. "
f"Max tries of poll status exceeded, query_execution_id is {self.job_id}."
)
return self.job_id
def check_failure(self, query_status):
if query_status in EmrContainerHook.FAILURE_STATES:
error_message = self.hook.get_job_failure_reason(self.job_id)
raise AirflowException(
f"EMR Containers job failed. Final state is {query_status}. "
f"query_execution_id is {self.job_id}. Error: {error_message}"
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
self.log.info("%s", event["message"])
return event["job_id"]
def on_kill(self) -> None:
"""Cancel the submitted job run."""
if self.job_id:
self.log.info("Stopping job run with jobId - %s", self.job_id)
response = self.hook.stop_query(self.job_id)
http_status_code = None
try:
http_status_code = response["ResponseMetadata"]["HTTPStatusCode"]
except Exception as ex:
self.log.error("Exception while cancelling query: %s", ex)
finally:
if http_status_code is None or http_status_code != 200:
self.log.error("Unable to request query cancel on EMR. Exiting")
else:
self.log.info(
"Polling EMR for query with id %s to reach final state",
self.job_id,
)
self.hook.poll_query_status(self.job_id)
class EmrCreateJobFlowOperator(BaseOperator):
"""
Creates an EMR JobFlow, reading the config from the EMR connection.
A dictionary of JobFlow overrides can be passed that override the config from the connection.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrCreateJobFlowOperator`
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node)
:param emr_conn_id: :ref:`Amazon Elastic MapReduce Connection <howto/connection:emr>`.
Use to receive an initial Amazon EMR cluster configuration:
``boto3.client('emr').run_job_flow`` request body.
If this is None or empty or the connection does not exist,
then an empty initial configuration is used.
:param job_flow_overrides: boto3 style arguments or reference to an arguments file
(must be '.json') to override specific ``emr_conn_id`` extra parameters. (templated)
:param region_name: Region named passed to EmrHook
:param wait_for_completion: Whether to finish task immediately after creation (False) or wait for jobflow
completion (True)
:param waiter_max_attempts: Maximum number of tries before failing.
:param waiter_delay: Number of seconds between polling the state of the notebook.
:param waiter_countdown: Max. seconds to wait for jobflow completion (only in combination with
wait_for_completion=True, None = no limit) (Deprecated. Please use waiter_max_attempts.)
:param waiter_check_interval_seconds: Number of seconds between polling the jobflow state. Defaults to 60
seconds. (Deprecated. Please use waiter_delay.)
:param deferrable: If True, the operator will wait asynchronously for the crawl to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"job_flow_overrides",
"waiter_delay",
"waiter_max_attempts",
)
template_ext: Sequence[str] = (".json",)
template_fields_renderers = {"job_flow_overrides": "json"}
ui_color = "#f9c915"
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
aws_conn_id: str = "aws_default",
emr_conn_id: str | None = "emr_default",
job_flow_overrides: str | dict[str, Any] | None = None,
region_name: str | None = None,
wait_for_completion: bool = False,
# TODO: waiter_max_attempts and waiter_delay should default to None when the other two are deprecated.
waiter_max_attempts: int | None | ArgNotSet = NOTSET,
waiter_delay: int | None | ArgNotSet = NOTSET,
waiter_countdown: int | None = None,
waiter_check_interval_seconds: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
):
if waiter_max_attempts is NOTSET:
warnings.warn(
"The parameter waiter_countdown has been deprecated to standardize "
"naming conventions. Please use waiter_max_attempts instead. In the "
"future this will default to None and defer to the waiter's default value."
)
# waiter_countdown defaults to never timing out, which is not supported
# by boto waiters, so we will set it here to "a very long time" for now.
waiter_max_attempts = (waiter_countdown or 999) // waiter_check_interval_seconds
if waiter_delay is NOTSET:
warnings.warn(
"The parameter waiter_check_interval_seconds has been deprecated to "
"standardize naming conventions. Please use waiter_delay instead. In the "
"future this will default to None and defer to the waiter's default value."
)
waiter_delay = waiter_check_interval_seconds
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.emr_conn_id = emr_conn_id
self.job_flow_overrides = job_flow_overrides or {}
self.region_name = region_name
self.wait_for_completion = wait_for_completion
self.waiter_max_attempts = int(waiter_max_attempts) # type: ignore[arg-type]
self.waiter_delay = int(waiter_delay) # type: ignore[arg-type]
self.deferrable = deferrable
@cached_property
def _emr_hook(self) -> EmrHook:
"""Create and return an EmrHook."""
return EmrHook(
aws_conn_id=self.aws_conn_id, emr_conn_id=self.emr_conn_id, region_name=self.region_name
)
def execute(self, context: Context) -> str | None:
self.log.info(
"Creating job flow using aws_conn_id: %s, emr_conn_id: %s", self.aws_conn_id, self.emr_conn_id
)
if isinstance(self.job_flow_overrides, str):
job_flow_overrides: dict[str, Any] = ast.literal_eval(self.job_flow_overrides)
self.job_flow_overrides = job_flow_overrides
else:
job_flow_overrides = self.job_flow_overrides
response = self._emr_hook.create_job_flow(job_flow_overrides)
if not response["ResponseMetadata"]["HTTPStatusCode"] == 200:
raise AirflowException(f"Job flow creation failed: {response}")
else:
self._job_flow_id = response["JobFlowId"]
self.log.info("Job flow with id %s created", self._job_flow_id)
EmrClusterLink.persist(
context=context,
operator=self,
region_name=self._emr_hook.conn_region_name,
aws_partition=self._emr_hook.conn_partition,
job_flow_id=self._job_flow_id,
)
if self._job_flow_id:
EmrLogsLink.persist(
context=context,
operator=self,
region_name=self._emr_hook.conn_region_name,
aws_partition=self._emr_hook.conn_partition,
job_flow_id=self._job_flow_id,
log_uri=get_log_uri(emr_client=self._emr_hook.conn, job_flow_id=self._job_flow_id),
)
if self.deferrable:
self.defer(
trigger=EmrCreateJobFlowTrigger(
job_flow_id=self._job_flow_id,
aws_conn_id=self.aws_conn_id,
poll_interval=self.waiter_delay,
max_attempts=self.waiter_max_attempts,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
if self.wait_for_completion:
self._emr_hook.get_waiter("job_flow_waiting").wait(
ClusterId=self._job_flow_id,
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
)
return self._job_flow_id
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error creating jobFlow: {event}")
else:
self.log.info("JobFlow created successfully")
return event["job_flow_id"]
def on_kill(self) -> None:
"""Terminate the EMR cluster (job flow) unless TerminationProtected is enabled on the cluster."""
if self._job_flow_id:
self.log.info("Terminating job flow %s", self._job_flow_id)
self._emr_hook.conn.terminate_job_flows(JobFlowIds=[self._job_flow_id])
class EmrModifyClusterOperator(BaseOperator):
"""
An operator that modifies an existing EMR cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrModifyClusterOperator`
:param cluster_id: cluster identifier
:param step_concurrency_level: Concurrency of the cluster
:param aws_conn_id: aws connection to uses
:param do_xcom_push: if True, cluster_id is pushed to XCom with key cluster_id.
"""
template_fields: Sequence[str] = ("cluster_id", "step_concurrency_level")
template_ext: Sequence[str] = ()
ui_color = "#f9c915"
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self, *, cluster_id: str, step_concurrency_level: int, aws_conn_id: str = "aws_default", **kwargs
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.cluster_id = cluster_id
self.step_concurrency_level = step_concurrency_level
def execute(self, context: Context) -> int:
emr_hook = EmrHook(aws_conn_id=self.aws_conn_id)
emr = emr_hook.get_conn()
if self.do_xcom_push:
context["ti"].xcom_push(key="cluster_id", value=self.cluster_id)
EmrClusterLink.persist(
context=context,
operator=self,
region_name=emr_hook.conn_region_name,
aws_partition=emr_hook.conn_partition,
job_flow_id=self.cluster_id,
)
EmrLogsLink.persist(
context=context,
operator=self,
region_name=emr_hook.conn_region_name,
aws_partition=emr_hook.conn_partition,
job_flow_id=self.cluster_id,
log_uri=get_log_uri(emr_client=emr_hook.conn, job_flow_id=self.cluster_id),
)
self.log.info("Modifying cluster %s", self.cluster_id)
response = emr.modify_cluster(
ClusterId=self.cluster_id, StepConcurrencyLevel=self.step_concurrency_level
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Modify cluster failed: {response}")
else:
self.log.info("Steps concurrency level %d", response["StepConcurrencyLevel"])
return response["StepConcurrencyLevel"]
class EmrTerminateJobFlowOperator(BaseOperator):
"""
Operator to terminate EMR JobFlows.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrTerminateJobFlowOperator`
:param job_flow_id: id of the JobFlow to terminate. (templated)
:param aws_conn_id: aws connection to uses
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check JobFlow status
:param waiter_max_attempts: The maximum number of times to poll for JobFlow status.
:param deferrable: If True, the operator will wait asynchronously for the crawl to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = ("job_flow_id",)
template_ext: Sequence[str] = ()
ui_color = "#f9c915"
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
job_flow_id: str,
aws_conn_id: str = "aws_default",
waiter_delay: int = 60,
waiter_max_attempts: int = 20,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.job_flow_id = job_flow_id
self.aws_conn_id = aws_conn_id
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> None:
emr_hook = EmrHook(aws_conn_id=self.aws_conn_id)
emr = emr_hook.get_conn()
EmrClusterLink.persist(
context=context,
operator=self,
region_name=emr_hook.conn_region_name,
aws_partition=emr_hook.conn_partition,
job_flow_id=self.job_flow_id,
)
EmrLogsLink.persist(
context=context,
operator=self,
region_name=emr_hook.conn_region_name,
aws_partition=emr_hook.conn_partition,
job_flow_id=self.job_flow_id,
log_uri=get_log_uri(emr_client=emr, job_flow_id=self.job_flow_id),
)
self.log.info("Terminating JobFlow %s", self.job_flow_id)
response = emr.terminate_job_flows(JobFlowIds=[self.job_flow_id])
if not response["ResponseMetadata"]["HTTPStatusCode"] == 200:
raise AirflowException(f"JobFlow termination failed: {response}")
else:
self.log.info("Terminating JobFlow with id %s", self.job_flow_id)
if self.deferrable:
self.defer(
trigger=EmrTerminateJobFlowTrigger(
job_flow_id=self.job_flow_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error terminating JobFlow: {event}")
else:
self.log.info("Jobflow terminated successfully.")
return
class EmrServerlessCreateApplicationOperator(BaseOperator):
"""
Operator to create Serverless EMR Application.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrServerlessCreateApplicationOperator`
:param release_label: The EMR release version associated with the application.
:param job_type: The type of application you want to start, such as Spark or Hive.
:param wait_for_completion: If true, wait for the Application to start before returning. Default to True.
If set to False, ``waiter_countdown`` and ``waiter_check_interval_seconds`` will only be applied when
waiting for the application to be in the ``CREATED`` state.
:param client_request_token: The client idempotency token of the application to create.
Its value must be unique for each request.
:param config: Optional dictionary for arbitrary parameters to the boto API create_application call.
:param aws_conn_id: AWS connection to use
:param waiter_countdown: (deprecated) Total amount of time, in seconds, the operator will wait for
the application to start. Defaults to 25 minutes.
:param waiter_check_interval_seconds: (deprecated) Number of seconds between polling the state
of the application. Defaults to 60 seconds.
:waiter_max_attempts: Number of times the waiter should poll the application to check the state.
If not set, the waiter will use its default value.
:param waiter_delay: Number of seconds between polling the state of the application.
"""
def __init__(
self,
release_label: str,
job_type: str,
client_request_token: str = "",
config: dict | None = None,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
waiter_countdown: int | ArgNotSet = NOTSET,
waiter_check_interval_seconds: int | ArgNotSet = NOTSET,
waiter_max_attempts: int | ArgNotSet = NOTSET,
waiter_delay: int | ArgNotSet = NOTSET,
**kwargs,
):
if waiter_check_interval_seconds is NOTSET:
waiter_delay = 60 if waiter_delay is NOTSET else waiter_delay
else:
waiter_delay = waiter_check_interval_seconds if waiter_delay is NOTSET else waiter_delay
warnings.warn(
"The parameter waiter_check_interval_seconds has been deprecated to standardize "
"naming conventions. Please use waiter_delay instead. In the "
"future this will default to None and defer to the waiter's default value."
)
if waiter_countdown is NOTSET:
waiter_max_attempts = 25 if waiter_max_attempts is NOTSET else waiter_max_attempts
else:
if waiter_max_attempts is NOTSET:
# ignoring mypy because it doesn't like ArgNotSet as an operand, but neither variables
# are of type ArgNotSet at this point.
waiter_max_attempts = waiter_countdown // waiter_delay # type: ignore[operator]
warnings.warn(
"The parameter waiter_countdown has been deprecated to standardize "
"naming conventions. Please use waiter_max_attempts instead. In the "
"future this will default to None and defer to the waiter's default value."
)
self.aws_conn_id = aws_conn_id
self.release_label = release_label
self.job_type = job_type
self.wait_for_completion = wait_for_completion
self.kwargs = kwargs
self.config = config or {}
self.waiter_max_attempts = int(waiter_max_attempts) # type: ignore[arg-type]
self.waiter_delay = int(waiter_delay) # type: ignore[arg-type]
super().__init__(**kwargs)
self.client_request_token = client_request_token or str(uuid4())
@cached_property
def hook(self) -> EmrServerlessHook:
"""Create and return an EmrServerlessHook."""
return EmrServerlessHook(aws_conn_id=self.aws_conn_id)
def execute(self, context: Context) -> str | None:
response = self.hook.conn.create_application(
clientToken=self.client_request_token,
releaseLabel=self.release_label,
type=self.job_type,
**self.config,
)
application_id = response["applicationId"]
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Application Creation failed: {response}")
self.log.info("EMR serverless application created: %s", application_id)
waiter = self.hook.get_waiter("serverless_app_created")
wait(
waiter=waiter,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
args={"applicationId": application_id},
failure_message="Serverless Application creation failed",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
self.log.info("Starting application %s", application_id)
self.hook.conn.start_application(applicationId=application_id)
if self.wait_for_completion:
waiter = self.hook.get_waiter("serverless_app_started")
wait(
waiter=waiter,
waiter_max_attempts=self.waiter_max_attempts,
waiter_delay=self.waiter_delay,
args={"applicationId": application_id},
failure_message="Serverless Application failed to start",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
return application_id
class EmrServerlessStartJobOperator(BaseOperator):
"""
Operator to start EMR Serverless job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrServerlessStartJobOperator`
:param application_id: ID of the EMR Serverless application to start.
:param execution_role_arn: ARN of role to perform action.
:param job_driver: Driver that the job runs on.
:param configuration_overrides: Configuration specifications to override existing configurations.
:param client_request_token: The client idempotency token of the application to create.
Its value must be unique for each request.
:param config: Optional dictionary for arbitrary parameters to the boto API start_job_run call.
:param wait_for_completion: If true, waits for the job to start before returning. Defaults to True.
If set to False, ``waiter_countdown`` and ``waiter_check_interval_seconds`` will only be applied
when waiting for the application be to in the ``STARTED`` state.
:param aws_conn_id: AWS connection to use.
:param name: Name for the EMR Serverless job. If not provided, a default name will be assigned.
:param waiter_countdown: (deprecated) Total amount of time, in seconds, the operator will wait for
the job finish. Defaults to 25 minutes.
:param waiter_check_interval_seconds: (deprecated) Number of seconds between polling the state of the job.
Defaults to 60 seconds.
:waiter_max_attempts: Number of times the waiter should poll the application to check the state.
If not set, the waiter will use its default value.
:param waiter_delay: Number of seconds between polling the state of the job run.
"""
template_fields: Sequence[str] = (
"application_id",
"config",
"execution_role_arn",
"job_driver",
"configuration_overrides",
)
template_fields_renderers = {
"config": "json",
"configuration_overrides": "json",
}
def __init__(
self,
application_id: str,
execution_role_arn: str,
job_driver: dict,
configuration_overrides: dict | None,
client_request_token: str = "",
config: dict | None = None,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
name: str | None = None,
waiter_countdown: int | ArgNotSet = NOTSET,
waiter_check_interval_seconds: int | ArgNotSet = NOTSET,
waiter_max_attempts: int | ArgNotSet = NOTSET,
waiter_delay: int | ArgNotSet = NOTSET,
**kwargs,
):
if waiter_check_interval_seconds is NOTSET:
waiter_delay = 60 if waiter_delay is NOTSET else waiter_delay
else:
waiter_delay = waiter_check_interval_seconds if waiter_delay is NOTSET else waiter_delay
warnings.warn(
"The parameter waiter_check_interval_seconds has been deprecated to standardize "
"naming conventions. Please use waiter_delay instead. In the "
"future this will default to None and defer to the waiter's default value."
)
if waiter_countdown is NOTSET:
waiter_max_attempts = 25 if waiter_max_attempts is NOTSET else waiter_max_attempts
else:
if waiter_max_attempts is NOTSET:
# ignoring mypy because it doesn't like ArgNotSet as an operand, but neither variables
# are of type ArgNotSet at this point.
waiter_max_attempts = waiter_countdown // waiter_delay # type: ignore[operator]
warnings.warn(
"The parameter waiter_countdown has been deprecated to standardize "
"naming conventions. Please use waiter_max_attempts instead. In the "
"future this will default to None and defer to the waiter's default value."
)
self.aws_conn_id = aws_conn_id
self.application_id = application_id
self.execution_role_arn = execution_role_arn
self.job_driver = job_driver
self.configuration_overrides = configuration_overrides
self.wait_for_completion = wait_for_completion
self.config = config or {}
self.name = name or self.config.pop("name", f"emr_serverless_job_airflow_{uuid4()}")
self.waiter_max_attempts = int(waiter_max_attempts) # type: ignore[arg-type]
self.waiter_delay = int(waiter_delay) # type: ignore[arg-type]
self.job_id: str | None = None
super().__init__(**kwargs)
self.client_request_token = client_request_token or str(uuid4())
@cached_property
def hook(self) -> EmrServerlessHook:
"""Create and return an EmrServerlessHook."""
return EmrServerlessHook(aws_conn_id=self.aws_conn_id)
def execute(self, context: Context) -> str | None:
self.log.info("Starting job on Application: %s", self.application_id)
app_state = self.hook.conn.get_application(applicationId=self.application_id)["application"]["state"]
if app_state not in EmrServerlessHook.APPLICATION_SUCCESS_STATES:
self.hook.conn.start_application(applicationId=self.application_id)
waiter = self.hook.get_waiter("serverless_app_started")
wait(
waiter=waiter,
waiter_max_attempts=self.waiter_max_attempts,
waiter_delay=self.waiter_delay,
args={"applicationId": self.application_id},
failure_message="Serverless Application failed to start",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
response = self.hook.conn.start_job_run(
clientToken=self.client_request_token,
applicationId=self.application_id,
executionRoleArn=self.execution_role_arn,
jobDriver=self.job_driver,
configurationOverrides=self.configuration_overrides,
name=self.name,
**self.config,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"EMR serverless job failed to start: {response}")
self.job_id = response["jobRunId"]
self.log.info("EMR serverless job started: %s", self.job_id)
if self.wait_for_completion:
waiter = self.hook.get_waiter("serverless_job_completed")
wait(
waiter=waiter,
waiter_max_attempts=self.waiter_max_attempts,
waiter_delay=self.waiter_delay,
args={"applicationId": self.application_id, "jobRunId": self.job_id},
failure_message="Serverless Job failed",
status_message="Serverless Job status is",
status_args=["jobRun.state", "jobRun.stateDetails"],
)
return self.job_id
def on_kill(self) -> None:
"""Cancel the submitted job run."""
if self.job_id:
self.log.info("Stopping job run with jobId - %s", self.job_id)
response = self.hook.conn.cancel_job_run(applicationId=self.application_id, jobRunId=self.job_id)
http_status_code = (
response.get("ResponseMetadata", {}).get("HTTPStatusCode") if response else None
)
if http_status_code is None or http_status_code != 200:
self.log.error("Unable to request query cancel on EMR Serverless. Exiting")
return
self.log.info(
"Polling EMR Serverless for query with id %s to reach final state",
self.job_id,
)
# This should be replaced with a boto waiter when available.
waiter(
get_state_callable=self.hook.conn.get_job_run,
get_state_args={
"applicationId": self.application_id,
"jobRunId": self.job_id,
},
parse_response=["jobRun", "state"],
desired_state=EmrServerlessHook.JOB_TERMINAL_STATES,
failure_states=set(),
object_type="job",
action="cancelled",
countdown=self.waiter_delay * self.waiter_max_attempts,
check_interval_seconds=self.waiter_delay,
)
class EmrServerlessStopApplicationOperator(BaseOperator):
"""
Operator to stop an EMR Serverless application.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrServerlessStopApplicationOperator`
:param application_id: ID of the EMR Serverless application to stop.
:param wait_for_completion: If true, wait for the Application to stop before returning. Default to True
:param aws_conn_id: AWS connection to use
:param waiter_countdown: Total amount of time, in seconds, the operator will wait for
the application be stopped. Defaults to 5 minutes.
:param waiter_check_interval_seconds: Number of seconds between polling the state of the application.
Defaults to 30 seconds.
:param force_stop: If set to True, any job for that app that is not in a terminal state will be cancelled.
Otherwise, trying to stop an app with running jobs will return an error.
If you want to wait for the jobs to finish gracefully, use
:class:`airflow.providers.amazon.aws.sensors.emr.EmrServerlessJobSensor`
"""
template_fields: Sequence[str] = ("application_id",)
def __init__(
self,
application_id: str,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
waiter_countdown: int | ArgNotSet = NOTSET,
waiter_check_interval_seconds: int | ArgNotSet = NOTSET,
waiter_max_attempts: int | ArgNotSet = NOTSET,
waiter_delay: int | ArgNotSet = NOTSET,
force_stop: bool = False,
**kwargs,
):
if waiter_check_interval_seconds is NOTSET:
waiter_delay = 60 if waiter_delay is NOTSET else waiter_delay
else:
waiter_delay = waiter_check_interval_seconds if waiter_delay is NOTSET else waiter_delay
warnings.warn(
"The parameter waiter_check_interval_seconds has been deprecated to standardize "
"naming conventions. Please use waiter_delay instead. In the "
"future this will default to None and defer to the waiter's default value."
)
if waiter_countdown is NOTSET:
waiter_max_attempts = 25 if waiter_max_attempts is NOTSET else waiter_max_attempts
else:
if waiter_max_attempts is NOTSET:
# ignoring mypy because it doesn't like ArgNotSet as an operand, but neither variables
# are of type ArgNotSet at this point.
waiter_max_attempts = waiter_countdown // waiter_delay # type: ignore[operator]
warnings.warn(
"The parameter waiter_countdown has been deprecated to standardize "
"naming conventions. Please use waiter_max_attempts instead. In the "
"future this will default to None and defer to the waiter's default value."
)
self.aws_conn_id = aws_conn_id
self.application_id = application_id
self.wait_for_completion = wait_for_completion
self.waiter_max_attempts = int(waiter_max_attempts) # type: ignore[arg-type]
self.waiter_delay = int(waiter_delay) # type: ignore[arg-type]
self.force_stop = force_stop
super().__init__(**kwargs)
@cached_property
def hook(self) -> EmrServerlessHook:
"""Create and return an EmrServerlessHook."""
return EmrServerlessHook(aws_conn_id=self.aws_conn_id)
def execute(self, context: Context) -> None:
self.log.info("Stopping application: %s", self.application_id)
if self.force_stop:
self.hook.cancel_running_jobs(
self.application_id,
waiter_config={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
self.hook.conn.stop_application(applicationId=self.application_id)
if self.wait_for_completion:
waiter = self.hook.get_waiter("serverless_app_stopped")
wait(
waiter=waiter,
waiter_max_attempts=self.waiter_max_attempts,
waiter_delay=self.waiter_delay,
args={"applicationId": self.application_id},
failure_message="Error stopping application",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
self.log.info("EMR serverless application %s stopped successfully", self.application_id)
class EmrServerlessDeleteApplicationOperator(EmrServerlessStopApplicationOperator):
"""
Operator to delete EMR Serverless application.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrServerlessDeleteApplicationOperator`
:param application_id: ID of the EMR Serverless application to delete.
:param wait_for_completion: If true, wait for the Application to be deleted before returning.
Defaults to True. Note that this operator will always wait for the application to be STOPPED first.
:param aws_conn_id: AWS connection to use
:param waiter_countdown: Total amount of time, in seconds, the operator will wait for each step of first,
the application to be stopped, and then deleted. Defaults to 25 minutes.
:param waiter_check_interval_seconds: Number of seconds between polling the state of the application.
Defaults to 60 seconds.
:param force_stop: If set to True, any job for that app that is not in a terminal state will be cancelled.
Otherwise, trying to delete an app with running jobs will return an error.
If you want to wait for the jobs to finish gracefully, use
:class:`airflow.providers.amazon.aws.sensors.emr.EmrServerlessJobSensor`
"""
template_fields: Sequence[str] = ("application_id",)
def __init__(
self,
application_id: str,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
waiter_countdown: int | ArgNotSet = NOTSET,
waiter_check_interval_seconds: int | ArgNotSet = NOTSET,
waiter_max_attempts: int | ArgNotSet = NOTSET,
waiter_delay: int | ArgNotSet = NOTSET,
force_stop: bool = False,
**kwargs,
):
if waiter_check_interval_seconds is NOTSET:
waiter_delay = 60 if waiter_delay is NOTSET else waiter_delay
else:
waiter_delay = waiter_check_interval_seconds if waiter_delay is NOTSET else waiter_delay
warnings.warn(
"The parameter waiter_check_interval_seconds has been deprecated to standardize "
"naming conventions. Please use waiter_delay instead. In the "
"future this will default to None and defer to the waiter's default value."
)
if waiter_countdown is NOTSET:
waiter_max_attempts = 25 if waiter_max_attempts is NOTSET else waiter_max_attempts
else:
if waiter_max_attempts is NOTSET:
# ignoring mypy because it doesn't like ArgNotSet as an operand, but neither variables
# are of type ArgNotSet at this point.
waiter_max_attempts = waiter_countdown // waiter_delay # type: ignore[operator]
warnings.warn(
"The parameter waiter_countdown has been deprecated to standardize "
"naming conventions. Please use waiter_max_attempts instead. In the "
"future this will default to None and defer to the waiter's default value."
)
self.wait_for_delete_completion = wait_for_completion
# super stops the app
super().__init__(
application_id=application_id,
# when deleting an app, we always need to wait for it to stop before we can call delete()
wait_for_completion=True,
aws_conn_id=aws_conn_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
force_stop=force_stop,
**kwargs,
)
def execute(self, context: Context) -> None:
# super stops the app (or makes sure it's already stopped)
super().execute(context)
self.log.info("Now deleting application: %s", self.application_id)
response = self.hook.conn.delete_application(applicationId=self.application_id)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Application deletion failed: {response}")
if self.wait_for_delete_completion:
waiter = self.hook.get_waiter("serverless_app_terminated")
wait(
waiter=waiter,
waiter_max_attempts=self.waiter_max_attempts,
waiter_delay=self.waiter_delay,
args={"applicationId": self.application_id},
failure_message="Error terminating application",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
self.log.info("EMR serverless application deleted")
| 65,010 | 43.742602 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/glue.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os.path
import urllib.parse
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.glue import GlueJobHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.links.glue import GlueJobRunDetailsLink
from airflow.providers.amazon.aws.triggers.glue import GlueJobCompleteTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
class GlueJobOperator(BaseOperator):
"""Create an AWS Glue Job.
AWS Glue is a serverless Spark ETL service for running Spark Jobs on the AWS
cloud. Language support: Python and Scala.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlueJobOperator`
:param job_name: unique job name per AWS Account
:param script_location: location of ETL script. Must be a local or S3 path
:param job_desc: job description details
:param concurrent_run_limit: The maximum number of concurrent runs allowed for a job
:param script_args: etl script arguments and AWS Glue arguments (templated)
:param retry_limit: The maximum number of times to retry this job if it fails
:param num_of_dpus: Number of AWS Glue DPUs to allocate to this Job.
:param region_name: aws region name (example: us-east-1)
:param s3_bucket: S3 bucket where logs and local etl script will be uploaded
:param iam_role_name: AWS IAM Role for Glue Job Execution
:param create_job_kwargs: Extra arguments for Glue Job Creation
:param run_job_kwargs: Extra arguments for Glue Job Run
:param wait_for_completion: Whether to wait for job run completion. (default: True)
:param deferrable: If True, the operator will wait asynchronously for the job to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param verbose: If True, Glue Job Run logs show in the Airflow Task Logs. (default: False)
:param update_config: If True, Operator will update job configuration. (default: False)
:param stop_job_run_on_kill: If True, Operator will stop the job run when task is killed.
"""
template_fields: Sequence[str] = (
"job_name",
"script_location",
"script_args",
"create_job_kwargs",
"s3_bucket",
"iam_role_name",
)
template_ext: Sequence[str] = ()
template_fields_renderers = {
"script_args": "json",
"create_job_kwargs": "json",
}
ui_color = "#ededed"
operator_extra_links = (GlueJobRunDetailsLink(),)
def __init__(
self,
*,
job_name: str = "aws_glue_default_job",
job_desc: str = "AWS Glue Job with Airflow",
script_location: str | None = None,
concurrent_run_limit: int | None = None,
script_args: dict | None = None,
retry_limit: int = 0,
num_of_dpus: int | float | None = None,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
s3_bucket: str | None = None,
iam_role_name: str | None = None,
create_job_kwargs: dict | None = None,
run_job_kwargs: dict | None = None,
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
verbose: bool = False,
update_config: bool = False,
job_poll_interval: int | float = 6,
stop_job_run_on_kill: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.job_name = job_name
self.job_desc = job_desc
self.script_location = script_location
self.concurrent_run_limit = concurrent_run_limit or 1
self.script_args = script_args or {}
self.retry_limit = retry_limit
self.num_of_dpus = num_of_dpus
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.s3_bucket = s3_bucket
self.iam_role_name = iam_role_name
self.s3_protocol = "s3://"
self.s3_artifacts_prefix = "artifacts/glue-scripts/"
self.create_job_kwargs = create_job_kwargs
self.run_job_kwargs = run_job_kwargs or {}
self.wait_for_completion = wait_for_completion
self.verbose = verbose
self.update_config = update_config
self.deferrable = deferrable
self.job_poll_interval = job_poll_interval
self.stop_job_run_on_kill = stop_job_run_on_kill
self._job_run_id: str | None = None
@cached_property
def glue_job_hook(self) -> GlueJobHook:
if self.script_location is None:
s3_script_location = None
elif not self.script_location.startswith(self.s3_protocol):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
script_name = os.path.basename(self.script_location)
s3_hook.load_file(
self.script_location, self.s3_artifacts_prefix + script_name, bucket_name=self.s3_bucket
)
s3_script_location = f"s3://{self.s3_bucket}/{self.s3_artifacts_prefix}{script_name}"
else:
s3_script_location = self.script_location
return GlueJobHook(
job_name=self.job_name,
desc=self.job_desc,
concurrent_run_limit=self.concurrent_run_limit,
script_location=s3_script_location,
retry_limit=self.retry_limit,
num_of_dpus=self.num_of_dpus,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
s3_bucket=self.s3_bucket,
iam_role_name=self.iam_role_name,
create_job_kwargs=self.create_job_kwargs,
update_config=self.update_config,
job_poll_interval=self.job_poll_interval,
)
def execute(self, context: Context):
"""Execute AWS Glue Job from Airflow.
:return: the current Glue job ID.
"""
self.log.info(
"Initializing AWS Glue Job: %s. Wait for completion: %s",
self.job_name,
self.wait_for_completion,
)
glue_job_run = self.glue_job_hook.initialize_job(self.script_args, self.run_job_kwargs)
self._job_run_id = glue_job_run["JobRunId"]
glue_job_run_url = GlueJobRunDetailsLink.format_str.format(
aws_domain=GlueJobRunDetailsLink.get_aws_domain(self.glue_job_hook.conn_partition),
region_name=self.glue_job_hook.conn_region_name,
job_name=urllib.parse.quote(self.job_name, safe=""),
job_run_id=self._job_run_id,
)
GlueJobRunDetailsLink.persist(
context=context,
operator=self,
region_name=self.glue_job_hook.conn_region_name,
aws_partition=self.glue_job_hook.conn_partition,
job_name=urllib.parse.quote(self.job_name, safe=""),
job_run_id=self._job_run_id,
)
self.log.info("You can monitor this Glue Job run at: %s", glue_job_run_url)
if self.deferrable:
self.defer(
trigger=GlueJobCompleteTrigger(
job_name=self.job_name,
run_id=self._job_run_id,
verbose=self.verbose,
aws_conn_id=self.aws_conn_id,
job_poll_interval=self.job_poll_interval,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
glue_job_run = self.glue_job_hook.job_completion(self.job_name, self._job_run_id, self.verbose)
self.log.info(
"AWS Glue Job: %s status: %s. Run Id: %s",
self.job_name,
glue_job_run["JobRunState"],
self._job_run_id,
)
else:
self.log.info("AWS Glue Job: %s. Run Id: %s", self.job_name, self._job_run_id)
return self._job_run_id
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error in glue job: {event}")
return event["value"]
def on_kill(self):
"""Cancel the running AWS Glue Job."""
if self.stop_job_run_on_kill:
self.log.info("Stopping AWS Glue Job: %s. Run Id: %s", self.job_name, self._job_run_id)
response = self.glue_job_hook.conn.batch_stop_job_run(
JobName=self.job_name,
JobRunIds=[self._job_run_id],
)
if not response["SuccessfulSubmissions"]:
self.log.error("Failed to stop AWS Glue Job: %s. Run Id: %s", self.job_name, self._job_run_id)
| 9,663 | 41.385965 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/sagemaker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import json
import time
import warnings
from functools import cached_property
from typing import TYPE_CHECKING, Any, Callable, Sequence
from botocore.exceptions import ClientError
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.providers.amazon.aws.triggers.sagemaker import SageMakerTrigger
from airflow.providers.amazon.aws.utils import trim_none_values
from airflow.providers.amazon.aws.utils.sagemaker import ApprovalStatus
from airflow.providers.amazon.aws.utils.tags import format_tags
from airflow.utils.json import AirflowJsonEncoder
if TYPE_CHECKING:
from airflow.utils.context import Context
DEFAULT_CONN_ID: str = "aws_default"
CHECK_INTERVAL_SECOND: int = 30
def serialize(result: dict) -> str:
return json.loads(json.dumps(result, cls=AirflowJsonEncoder))
class SageMakerBaseOperator(BaseOperator):
"""This is the base operator for all SageMaker operators.
:param config: The configuration necessary to start a training job (templated)
"""
template_fields: Sequence[str] = ("config",)
template_ext: Sequence[str] = ()
template_fields_renderers: dict = {"config": "json"}
ui_color: str = "#ededed"
integer_fields: list[list[Any]] = []
def __init__(self, *, config: dict, aws_conn_id: str = DEFAULT_CONN_ID, **kwargs):
super().__init__(**kwargs)
self.config = config
self.aws_conn_id = aws_conn_id
def parse_integer(self, config: dict, field: list[str] | str) -> None:
"""Recursive method for parsing string fields holding integer values to integers."""
if len(field) == 1:
if isinstance(config, list):
for sub_config in config:
self.parse_integer(sub_config, field)
return
head = field[0]
if head in config:
config[head] = int(config[head])
return
if isinstance(config, list):
for sub_config in config:
self.parse_integer(sub_config, field)
return
(head, tail) = (field[0], field[1:])
if head in config:
self.parse_integer(config[head], tail)
return
def parse_config_integers(self) -> None:
"""Parse the integer fields to ints in case the config is rendered by Jinja and all fields are str."""
for field in self.integer_fields:
self.parse_integer(self.config, field)
def expand_role(self) -> None:
"""Placeholder for calling boto3's `expand_role`, which expands an IAM role name into an ARN."""
def preprocess_config(self) -> None:
"""Process the config into a usable form."""
self._create_integer_fields()
self.log.info("Preprocessing the config and doing required s3_operations")
self.hook.configure_s3_resources(self.config)
self.parse_config_integers()
self.expand_role()
self.log.info(
"After preprocessing the config is:\n %s",
json.dumps(self.config, sort_keys=True, indent=4, separators=(",", ": ")),
)
def _create_integer_fields(self) -> None:
"""
Set fields which should be cast to integers.
Child classes should override this method if they need integer fields parsed.
"""
self.integer_fields = []
def _get_unique_job_name(
self, proposed_name: str, fail_if_exists: bool, describe_func: Callable[[str], Any]
) -> str:
"""
Returns the proposed name if it doesn't already exist, otherwise returns it with a timestamp suffix.
:param proposed_name: Base name.
:param fail_if_exists: Will throw an error if a job with that name already exists
instead of finding a new name.
:param describe_func: The `describe_` function for that kind of job.
We use it as an O(1) way to check if a job exists.
"""
job_name = proposed_name
while self._check_if_job_exists(job_name, describe_func):
# this while should loop only once in most cases, just setting it this way to regenerate a name
# in case there is collision.
if fail_if_exists:
raise AirflowException(f"A SageMaker job with name {job_name} already exists.")
else:
job_name = f"{proposed_name}-{time.time_ns()//1000000}"
self.log.info("Changed job name to '%s' to avoid collision.", job_name)
return job_name
def _check_if_job_exists(self, job_name, describe_func: Callable[[str], Any]) -> bool:
"""Returns True if job exists, False otherwise."""
try:
describe_func(job_name)
self.log.info("Found existing job with name '%s'.", job_name)
return True
except ClientError as e:
if e.response["Error"]["Code"] == "ValidationException":
return False # ValidationException is thrown when the job could not be found
else:
raise e
def execute(self, context: Context):
raise NotImplementedError("Please implement execute() in sub class!")
@cached_property
def hook(self):
"""Return SageMakerHook."""
return SageMakerHook(aws_conn_id=self.aws_conn_id)
class SageMakerProcessingOperator(SageMakerBaseOperator):
"""
Use Amazon SageMaker Processing to analyze data and evaluate machine learning models on Amazon SageMaker.
With Processing, you can use a simplified, managed experience on SageMaker
to run your data processing workloads, such as feature engineering, data
validation, model evaluation, and model interpretation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerProcessingOperator`
:param config: The configuration necessary to start a processing job (templated).
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_processing_job`
:param aws_conn_id: The AWS connection ID to use.
:param wait_for_completion: If wait is set to True, the time interval, in seconds,
that the operation waits to check the status of the processing job.
:param print_log: if the operator should print the cloudwatch log during processing
:param check_interval: if wait is set to be true, this is the time interval
in seconds which the operator will check the status of the processing job
:param max_attempts: Number of times to poll for query state before returning the current state,
defaults to None.
:param max_ingestion_time: If wait is set to True, the operation fails if the processing job
doesn't finish within max_ingestion_time seconds. If you set this parameter to None,
the operation does not timeout.
:param action_if_job_exists: Behaviour if the job name already exists. Possible options are "timestamp"
(default), "increment" (deprecated) and "fail".
:param deferrable: Run operator in the deferrable mode. This is only effective if wait_for_completion is
set to True.
:return Dict: Returns The ARN of the processing job created in Amazon SageMaker.
"""
def __init__(
self,
*,
config: dict,
aws_conn_id: str = DEFAULT_CONN_ID,
wait_for_completion: bool = True,
print_log: bool = True,
check_interval: int = CHECK_INTERVAL_SECOND,
max_attempts: int | None = None,
max_ingestion_time: int | None = None,
action_if_job_exists: str = "timestamp",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
if action_if_job_exists not in ("increment", "fail", "timestamp"):
raise AirflowException(
f"Argument action_if_job_exists accepts only 'timestamp', 'increment' and 'fail'. \
Provided value: '{action_if_job_exists}'."
)
if action_if_job_exists == "increment":
warnings.warn(
"Action 'increment' on job name conflict has been deprecated for performance reasons."
"The alternative to 'fail' is now 'timestamp'.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.action_if_job_exists = action_if_job_exists
self.wait_for_completion = wait_for_completion
self.print_log = print_log
self.check_interval = check_interval
self.max_attempts = max_attempts or 60
self.max_ingestion_time = max_ingestion_time
self.deferrable = deferrable
def _create_integer_fields(self) -> None:
"""Set fields which should be cast to integers."""
self.integer_fields: list[list[str] | list[list[str]]] = [
["ProcessingResources", "ClusterConfig", "InstanceCount"],
["ProcessingResources", "ClusterConfig", "VolumeSizeInGB"],
]
if "StoppingCondition" in self.config:
self.integer_fields.append(["StoppingCondition", "MaxRuntimeInSeconds"])
def expand_role(self) -> None:
"""Expands an IAM role name into an ARN."""
if "RoleArn" in self.config:
hook = AwsBaseHook(self.aws_conn_id, client_type="iam")
self.config["RoleArn"] = hook.expand_role(self.config["RoleArn"])
def execute(self, context: Context) -> dict:
self.preprocess_config()
self.config["ProcessingJobName"] = self._get_unique_job_name(
self.config["ProcessingJobName"],
self.action_if_job_exists == "fail",
self.hook.describe_processing_job,
)
if self.deferrable and not self.wait_for_completion:
self.log.warning(
"Setting deferrable to True does not have effect when wait_for_completion is set to False."
)
wait_for_completion = self.wait_for_completion
if self.deferrable and self.wait_for_completion:
# Set wait_for_completion to False so that it waits for the status in the deferred task.
wait_for_completion = False
response = self.hook.create_processing_job(
self.config,
wait_for_completion=wait_for_completion,
check_interval=self.check_interval,
max_ingestion_time=self.max_ingestion_time,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker Processing Job creation failed: {response}")
if self.deferrable and self.wait_for_completion:
self.defer(
timeout=self.execution_timeout,
trigger=SageMakerTrigger(
job_name=self.config["ProcessingJobName"],
job_type="Processing",
poke_interval=self.check_interval,
max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
return {"Processing": serialize(self.hook.describe_processing_job(self.config["ProcessingJobName"]))}
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
else:
self.log.info(event["message"])
return {"Processing": serialize(self.hook.describe_processing_job(self.config["ProcessingJobName"]))}
class SageMakerEndpointConfigOperator(SageMakerBaseOperator):
"""
Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models.
In the configuration, you identify one or more models, created using the CreateModel API, to deploy and
the resources that you want Amazon SageMaker to provision.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerEndpointConfigOperator`
:param config: The configuration necessary to create an endpoint config.
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_endpoint_config`
:param aws_conn_id: The AWS connection ID to use.
:return Dict: Returns The ARN of the endpoint config created in Amazon SageMaker.
"""
def __init__(
self,
*,
config: dict,
aws_conn_id: str = DEFAULT_CONN_ID,
**kwargs,
):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
def _create_integer_fields(self) -> None:
"""Set fields which should be cast to integers."""
self.integer_fields: list[list[str]] = [["ProductionVariants", "InitialInstanceCount"]]
def execute(self, context: Context) -> dict:
self.preprocess_config()
self.log.info("Creating SageMaker Endpoint Config %s.", self.config["EndpointConfigName"])
response = self.hook.create_endpoint_config(self.config)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker endpoint config creation failed: {response}")
else:
return {
"EndpointConfig": serialize(
self.hook.describe_endpoint_config(self.config["EndpointConfigName"])
)
}
class SageMakerEndpointOperator(SageMakerBaseOperator):
"""
When you create a serverless endpoint, SageMaker provisions and manages the compute resources for you.
Then, you can make inference requests to the endpoint and receive model predictions
in response. SageMaker scales the compute resources up and down as needed to handle
your request traffic.
Requires an Endpoint Config.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerEndpointOperator`
:param config:
The configuration necessary to create an endpoint.
If you need to create a SageMaker endpoint based on an existed
SageMaker model and an existed SageMaker endpoint config::
config = endpoint_configuration;
If you need to create all of SageMaker model, SageMaker endpoint-config and SageMaker endpoint::
config = {
'Model': model_configuration,
'EndpointConfig': endpoint_config_configuration,
'Endpoint': endpoint_configuration
}
For details of the configuration parameter of model_configuration see
:py:meth:`SageMaker.Client.create_model`
For details of the configuration parameter of endpoint_config_configuration see
:py:meth:`SageMaker.Client.create_endpoint_config`
For details of the configuration parameter of endpoint_configuration see
:py:meth:`SageMaker.Client.create_endpoint`
:param wait_for_completion: Whether the operator should wait until the endpoint creation finishes.
:param check_interval: If wait is set to True, this is the time interval, in seconds, that this operation
waits before polling the status of the endpoint creation.
:param max_ingestion_time: If wait is set to True, this operation fails if the endpoint creation doesn't
finish within max_ingestion_time seconds. If you set this parameter to None it never times out.
:param operation: Whether to create an endpoint or update an endpoint. Must be either 'create or 'update'.
:param aws_conn_id: The AWS connection ID to use.
:param deferrable: Will wait asynchronously for completion.
:return Dict: Returns The ARN of the endpoint created in Amazon SageMaker.
"""
def __init__(
self,
*,
config: dict,
aws_conn_id: str = DEFAULT_CONN_ID,
wait_for_completion: bool = True,
check_interval: int = CHECK_INTERVAL_SECOND,
max_ingestion_time: int | None = None,
operation: str = "create",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.max_ingestion_time = max_ingestion_time or 3600 * 10
self.operation = operation.lower()
if self.operation not in ["create", "update"]:
raise ValueError('Invalid value! Argument operation has to be one of "create" and "update"')
self.deferrable = deferrable
def _create_integer_fields(self) -> None:
"""Set fields which should be cast to integers."""
if "EndpointConfig" in self.config:
self.integer_fields: list[list[str]] = [
["EndpointConfig", "ProductionVariants", "InitialInstanceCount"]
]
def expand_role(self) -> None:
"""Expands an IAM role name into an ARN."""
if "Model" not in self.config:
return
hook = AwsBaseHook(self.aws_conn_id, client_type="iam")
config = self.config["Model"]
if "ExecutionRoleArn" in config:
config["ExecutionRoleArn"] = hook.expand_role(config["ExecutionRoleArn"])
def execute(self, context: Context) -> dict:
self.preprocess_config()
model_info = self.config.get("Model")
endpoint_config_info = self.config.get("EndpointConfig")
endpoint_info = self.config.get("Endpoint", self.config)
if model_info:
self.log.info("Creating SageMaker model %s.", model_info["ModelName"])
self.hook.create_model(model_info)
if endpoint_config_info:
self.log.info("Creating endpoint config %s.", endpoint_config_info["EndpointConfigName"])
self.hook.create_endpoint_config(endpoint_config_info)
if self.operation == "create":
sagemaker_operation = self.hook.create_endpoint
log_str = "Creating"
elif self.operation == "update":
sagemaker_operation = self.hook.update_endpoint
log_str = "Updating"
else:
raise ValueError('Invalid value! Argument operation has to be one of "create" and "update"')
self.log.info("%s SageMaker endpoint %s.", log_str, endpoint_info["EndpointName"])
try:
response = sagemaker_operation(
endpoint_info,
wait_for_completion=False,
)
# waiting for completion is handled here in the operator
except ClientError:
self.operation = "update"
sagemaker_operation = self.hook.update_endpoint
response = sagemaker_operation(
endpoint_info,
wait_for_completion=False,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker endpoint creation failed: {response}")
if self.deferrable:
self.defer(
trigger=SageMakerTrigger(
job_name=endpoint_info["EndpointName"],
job_type="endpoint",
poke_interval=self.check_interval,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
timeout=datetime.timedelta(seconds=self.max_ingestion_time),
)
elif self.wait_for_completion:
self.hook.get_waiter("endpoint_in_service").wait(
EndpointName=endpoint_info["EndpointName"],
WaiterConfig={"Delay": self.check_interval, "MaxAttempts": self.max_ingestion_time},
)
return {
"EndpointConfig": serialize(
self.hook.describe_endpoint_config(endpoint_info["EndpointConfigName"])
),
"Endpoint": serialize(self.hook.describe_endpoint(endpoint_info["EndpointName"])),
}
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
endpoint_info = self.config.get("Endpoint", self.config)
return {
"EndpointConfig": serialize(
self.hook.describe_endpoint_config(endpoint_info["EndpointConfigName"])
),
"Endpoint": serialize(self.hook.describe_endpoint(endpoint_info["EndpointName"])),
}
class SageMakerTransformOperator(SageMakerBaseOperator):
"""
Starts a transform job.
A transform job uses a trained model to get inferences on a dataset
and saves these results to an Amazon S3 location that you specify.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerTransformOperator`
:param config: The configuration necessary to start a transform job (templated).
If you need to create a SageMaker transform job based on an existed SageMaker model::
config = transform_config
If you need to create both SageMaker model and SageMaker Transform job::
config = {
'Model': model_config,
'Transform': transform_config
}
For details of the configuration parameter of transform_config see
:py:meth:`SageMaker.Client.create_transform_job`
For details of the configuration parameter of model_config, See:
:py:meth:`SageMaker.Client.create_model`
:param aws_conn_id: The AWS connection ID to use.
:param wait_for_completion: Set to True to wait until the transform job finishes.
:param check_interval: If wait is set to True, the time interval, in seconds,
that this operation waits to check the status of the transform job.
:param max_attempts: Number of times to poll for query state before returning the current state,
defaults to None.
:param max_ingestion_time: If wait is set to True, the operation fails
if the transform job doesn't finish within max_ingestion_time seconds. If you
set this parameter to None, the operation does not timeout.
:param check_if_job_exists: If set to true, then the operator will check whether a transform job
already exists for the name in the config.
:param action_if_job_exists: Behaviour if the job name already exists. Possible options are "timestamp"
(default), "increment" (deprecated) and "fail".
This is only relevant if check_if_job_exists is True.
:return Dict: Returns The ARN of the model created in Amazon SageMaker.
"""
def __init__(
self,
*,
config: dict,
aws_conn_id: str = DEFAULT_CONN_ID,
wait_for_completion: bool = True,
check_interval: int = CHECK_INTERVAL_SECOND,
max_attempts: int | None = None,
max_ingestion_time: int | None = None,
check_if_job_exists: bool = True,
action_if_job_exists: str = "timestamp",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.max_attempts = max_attempts or 60
self.max_ingestion_time = max_ingestion_time
self.check_if_job_exists = check_if_job_exists
if action_if_job_exists in ("increment", "fail", "timestamp"):
if action_if_job_exists == "increment":
warnings.warn(
"Action 'increment' on job name conflict has been deprecated for performance reasons."
"The alternative to 'fail' is now 'timestamp'.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.action_if_job_exists = action_if_job_exists
else:
raise AirflowException(
f"Argument action_if_job_exists accepts only 'timestamp', 'increment' and 'fail'. \
Provided value: '{action_if_job_exists}'."
)
self.deferrable = deferrable
def _create_integer_fields(self) -> None:
"""Set fields which should be cast to integers."""
self.integer_fields: list[list[str]] = [
["Transform", "TransformResources", "InstanceCount"],
["Transform", "MaxConcurrentTransforms"],
["Transform", "MaxPayloadInMB"],
]
if "Transform" not in self.config:
for field in self.integer_fields:
field.pop(0)
def expand_role(self) -> None:
"""Expands an IAM role name into an ARN."""
if "Model" not in self.config:
return
config = self.config["Model"]
if "ExecutionRoleArn" in config:
hook = AwsBaseHook(self.aws_conn_id, client_type="iam")
config["ExecutionRoleArn"] = hook.expand_role(config["ExecutionRoleArn"])
def execute(self, context: Context) -> dict:
self.preprocess_config()
transform_config = self.config.get("Transform", self.config)
if self.check_if_job_exists:
transform_config["TransformJobName"] = self._get_unique_job_name(
transform_config["TransformJobName"],
self.action_if_job_exists == "fail",
self.hook.describe_transform_job,
)
model_config = self.config.get("Model")
if model_config:
self.log.info("Creating SageMaker Model %s for transform job", model_config["ModelName"])
self.hook.create_model(model_config)
self.log.info("Creating SageMaker transform Job %s.", transform_config["TransformJobName"])
if self.deferrable and not self.wait_for_completion:
self.log.warning(
"Setting deferrable to True does not have effect when wait_for_completion is set to False."
)
wait_for_completion = self.wait_for_completion
if self.deferrable and self.wait_for_completion:
# Set wait_for_completion to False so that it waits for the status in the deferred task.
wait_for_completion = False
response = self.hook.create_transform_job(
transform_config,
wait_for_completion=wait_for_completion,
check_interval=self.check_interval,
max_ingestion_time=self.max_ingestion_time,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker transform Job creation failed: {response}")
if self.deferrable and self.wait_for_completion:
self.defer(
timeout=self.execution_timeout,
trigger=SageMakerTrigger(
job_name=transform_config["TransformJobName"],
job_type="Transform",
poke_interval=self.check_interval,
max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
return {
"Model": serialize(self.hook.describe_model(transform_config["ModelName"])),
"Transform": serialize(self.hook.describe_transform_job(transform_config["TransformJobName"])),
}
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
else:
self.log.info(event["message"])
transform_config = self.config.get("Transform", self.config)
return {
"Model": serialize(self.hook.describe_model(transform_config["ModelName"])),
"Transform": serialize(self.hook.describe_transform_job(transform_config["TransformJobName"])),
}
class SageMakerTuningOperator(SageMakerBaseOperator):
"""
Starts a hyperparameter tuning job.
A hyperparameter tuning job finds the best version of a model by running
many training jobs on your dataset using the algorithm you choose and
values for hyperparameters within ranges that you specify. It then chooses
the hyperparameter values that result in a model that performs the best,
as measured by an objective metric that you choose.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerTuningOperator`
:param config: The configuration necessary to start a tuning job (templated).
For details of the configuration parameter see
:py:meth:`SageMaker.Client.create_hyper_parameter_tuning_job`
:param aws_conn_id: The AWS connection ID to use.
:param wait_for_completion: Set to True to wait until the tuning job finishes.
:param check_interval: If wait is set to True, the time interval, in seconds,
that this operation waits to check the status of the tuning job.
:param max_ingestion_time: If wait is set to True, the operation fails
if the tuning job doesn't finish within max_ingestion_time seconds. If you
set this parameter to None, the operation does not timeout.
:param deferrable: Will wait asynchronously for completion.
:return Dict: Returns The ARN of the tuning job created in Amazon SageMaker.
"""
def __init__(
self,
*,
config: dict,
aws_conn_id: str = DEFAULT_CONN_ID,
wait_for_completion: bool = True,
check_interval: int = CHECK_INTERVAL_SECOND,
max_ingestion_time: int | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.max_ingestion_time = max_ingestion_time
self.deferrable = deferrable
def expand_role(self) -> None:
"""Expands an IAM role name into an ARN."""
if "TrainingJobDefinition" in self.config:
config = self.config["TrainingJobDefinition"]
if "RoleArn" in config:
hook = AwsBaseHook(self.aws_conn_id, client_type="iam")
config["RoleArn"] = hook.expand_role(config["RoleArn"])
def _create_integer_fields(self) -> None:
"""Set fields which should be cast to integers."""
self.integer_fields: list[list[str]] = [
["HyperParameterTuningJobConfig", "ResourceLimits", "MaxNumberOfTrainingJobs"],
["HyperParameterTuningJobConfig", "ResourceLimits", "MaxParallelTrainingJobs"],
["TrainingJobDefinition", "ResourceConfig", "InstanceCount"],
["TrainingJobDefinition", "ResourceConfig", "VolumeSizeInGB"],
["TrainingJobDefinition", "StoppingCondition", "MaxRuntimeInSeconds"],
]
def execute(self, context: Context) -> dict:
self.preprocess_config()
self.log.info(
"Creating SageMaker Hyper-Parameter Tuning Job %s", self.config["HyperParameterTuningJobName"]
)
response = self.hook.create_tuning_job(
self.config,
wait_for_completion=False, # we handle this here
check_interval=self.check_interval,
max_ingestion_time=self.max_ingestion_time,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker Tuning Job creation failed: {response}")
if self.deferrable:
self.defer(
trigger=SageMakerTrigger(
job_name=self.config["HyperParameterTuningJobName"],
job_type="tuning",
poke_interval=self.check_interval,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
timeout=datetime.timedelta(seconds=self.max_ingestion_time)
if self.max_ingestion_time is not None
else None,
)
description = {} # never executed but makes static checkers happy
elif self.wait_for_completion:
description = self.hook.check_status(
self.config["HyperParameterTuningJobName"],
"HyperParameterTuningJobStatus",
self.hook.describe_tuning_job,
self.check_interval,
self.max_ingestion_time,
)
else:
description = self.hook.describe_tuning_job(self.config["HyperParameterTuningJobName"])
return {"Tuning": serialize(description)}
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
return {
"Tuning": serialize(self.hook.describe_tuning_job(self.config["HyperParameterTuningJobName"]))
}
class SageMakerModelOperator(SageMakerBaseOperator):
"""
Creates a model in Amazon SageMaker.
In the request, you name the model and describe a primary container. For the
primary container, you specify the Docker image that contains inference code,
artifacts (from prior training), and a custom environment map that the inference
code uses when you deploy the model for predictions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerModelOperator`
:param config: The configuration necessary to create a model.
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_model`
:param aws_conn_id: The AWS connection ID to use.
:return Dict: Returns The ARN of the model created in Amazon SageMaker.
"""
def __init__(self, *, config: dict, aws_conn_id: str = DEFAULT_CONN_ID, **kwargs):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
def expand_role(self) -> None:
"""Expands an IAM role name into an ARN."""
if "ExecutionRoleArn" in self.config:
hook = AwsBaseHook(self.aws_conn_id, client_type="iam")
self.config["ExecutionRoleArn"] = hook.expand_role(self.config["ExecutionRoleArn"])
def execute(self, context: Context) -> dict:
self.preprocess_config()
self.log.info("Creating SageMaker Model %s.", self.config["ModelName"])
response = self.hook.create_model(self.config)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker model creation failed: {response}")
else:
return {"Model": serialize(self.hook.describe_model(self.config["ModelName"]))}
class SageMakerTrainingOperator(SageMakerBaseOperator):
"""
Starts a model training job.
After training completes, Amazon SageMaker saves the resulting
model artifacts to an Amazon S3 location that you specify.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerTrainingOperator`
:param config: The configuration necessary to start a training job (templated).
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_training_job`
:param aws_conn_id: The AWS connection ID to use.
:param wait_for_completion: If wait is set to True, the time interval, in seconds,
that the operation waits to check the status of the training job.
:param print_log: if the operator should print the cloudwatch log during training
:param check_interval: if wait is set to be true, this is the time interval
in seconds which the operator will check the status of the training job
:param max_attempts: Number of times to poll for query state before returning the current state,
defaults to None.
:param max_ingestion_time: If wait is set to True, the operation fails if the training job
doesn't finish within max_ingestion_time seconds. If you set this parameter to None,
the operation does not timeout.
:param check_if_job_exists: If set to true, then the operator will check whether a training job
already exists for the name in the config.
:param action_if_job_exists: Behaviour if the job name already exists. Possible options are "timestamp"
(default), "increment" (deprecated) and "fail".
This is only relevant if check_if_job_exists is True.
:param deferrable: Run operator in the deferrable mode. This is only effective if wait_for_completion is
set to True.
:return Dict: Returns The ARN of the training job created in Amazon SageMaker.
"""
def __init__(
self,
*,
config: dict,
aws_conn_id: str = DEFAULT_CONN_ID,
wait_for_completion: bool = True,
print_log: bool = True,
check_interval: int = CHECK_INTERVAL_SECOND,
max_attempts: int | None = None,
max_ingestion_time: int | None = None,
check_if_job_exists: bool = True,
action_if_job_exists: str = "timestamp",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
self.wait_for_completion = wait_for_completion
self.print_log = print_log
self.check_interval = check_interval
self.max_attempts = max_attempts or 60
self.max_ingestion_time = max_ingestion_time
self.check_if_job_exists = check_if_job_exists
if action_if_job_exists in {"timestamp", "increment", "fail"}:
if action_if_job_exists == "increment":
warnings.warn(
"Action 'increment' on job name conflict has been deprecated for performance reasons."
"The alternative to 'fail' is now 'timestamp'.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.action_if_job_exists = action_if_job_exists
else:
raise AirflowException(
f"Argument action_if_job_exists accepts only 'timestamp', 'increment' and 'fail'. \
Provided value: '{action_if_job_exists}'."
)
self.deferrable = deferrable
def expand_role(self) -> None:
"""Expands an IAM role name into an ARN."""
if "RoleArn" in self.config:
hook = AwsBaseHook(self.aws_conn_id, client_type="iam")
self.config["RoleArn"] = hook.expand_role(self.config["RoleArn"])
def _create_integer_fields(self) -> None:
"""Set fields which should be cast to integers."""
self.integer_fields: list[list[str]] = [
["ResourceConfig", "InstanceCount"],
["ResourceConfig", "VolumeSizeInGB"],
["StoppingCondition", "MaxRuntimeInSeconds"],
]
def execute(self, context: Context) -> dict:
self.preprocess_config()
if self.check_if_job_exists:
self.config["TrainingJobName"] = self._get_unique_job_name(
self.config["TrainingJobName"],
self.action_if_job_exists == "fail",
self.hook.describe_training_job,
)
self.log.info("Creating SageMaker training job %s.", self.config["TrainingJobName"])
if self.deferrable and not self.wait_for_completion:
self.log.warning(
"Setting deferrable to True does not have effect when wait_for_completion is set to False."
)
wait_for_completion = self.wait_for_completion
if self.deferrable and self.wait_for_completion:
# Set wait_for_completion to False so that it waits for the status in the deferred task.
wait_for_completion = False
response = self.hook.create_training_job(
self.config,
wait_for_completion=wait_for_completion,
print_log=self.print_log,
check_interval=self.check_interval,
max_ingestion_time=self.max_ingestion_time,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Sagemaker Training Job creation failed: {response}")
if self.deferrable and self.wait_for_completion:
self.defer(
timeout=self.execution_timeout,
trigger=SageMakerTrigger(
job_name=self.config["TrainingJobName"],
job_type="Training",
poke_interval=self.check_interval,
max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
result = {"Training": serialize(self.hook.describe_training_job(self.config["TrainingJobName"]))}
return result
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
else:
self.log.info(event["message"])
result = {"Training": serialize(self.hook.describe_training_job(self.config["TrainingJobName"]))}
return result
class SageMakerDeleteModelOperator(SageMakerBaseOperator):
"""
Deletes a SageMaker model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerDeleteModelOperator`
:param config: The configuration necessary to delete the model.
For details of the configuration parameter see :py:meth:`SageMaker.Client.delete_model`
:param aws_conn_id: The AWS connection ID to use.
"""
def __init__(self, *, config: dict, aws_conn_id: str = DEFAULT_CONN_ID, **kwargs):
super().__init__(config=config, aws_conn_id=aws_conn_id, **kwargs)
def execute(self, context: Context) -> Any:
sagemaker_hook = SageMakerHook(aws_conn_id=self.aws_conn_id)
sagemaker_hook.delete_model(model_name=self.config["ModelName"])
self.log.info("Model %s deleted successfully.", self.config["ModelName"])
class SageMakerStartPipelineOperator(SageMakerBaseOperator):
"""
Starts a SageMaker pipeline execution.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerStartPipelineOperator`
:param config: The configuration to start the pipeline execution.
:param aws_conn_id: The AWS connection ID to use.
:param pipeline_name: Name of the pipeline to start.
:param display_name: The name this pipeline execution will have in the UI. Doesn't need to be unique.
:param pipeline_params: Optional parameters for the pipeline.
All parameters supplied need to already be present in the pipeline definition.
:param wait_for_completion: If true, this operator will only complete once the pipeline is complete.
:param check_interval: How long to wait between checks for pipeline status when waiting for completion.
:param verbose: Whether to print steps details when waiting for completion.
Defaults to true, consider turning off for pipelines that have thousands of steps.
:return str: Returns The ARN of the pipeline execution created in Amazon SageMaker.
"""
template_fields: Sequence[str] = ("aws_conn_id", "pipeline_name", "display_name", "pipeline_params")
def __init__(
self,
*,
aws_conn_id: str = DEFAULT_CONN_ID,
pipeline_name: str,
display_name: str = "airflow-triggered-execution",
pipeline_params: dict | None = None,
wait_for_completion: bool = False,
check_interval: int = CHECK_INTERVAL_SECOND,
verbose: bool = True,
**kwargs,
):
super().__init__(config={}, aws_conn_id=aws_conn_id, **kwargs)
self.pipeline_name = pipeline_name
self.display_name = display_name
self.pipeline_params = pipeline_params
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.verbose = verbose
def execute(self, context: Context) -> str:
arn = self.hook.start_pipeline(
pipeline_name=self.pipeline_name,
display_name=self.display_name,
pipeline_params=self.pipeline_params,
wait_for_completion=self.wait_for_completion,
check_interval=self.check_interval,
verbose=self.verbose,
)
self.log.info(
"Starting a new execution for pipeline %s, running with ARN %s", self.pipeline_name, arn
)
return arn
class SageMakerStopPipelineOperator(SageMakerBaseOperator):
"""
Stops a SageMaker pipeline execution.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerStopPipelineOperator`
:param config: The configuration to start the pipeline execution.
:param aws_conn_id: The AWS connection ID to use.
:param pipeline_exec_arn: Amazon Resource Name of the pipeline execution to stop.
:param wait_for_completion: If true, this operator will only complete once the pipeline is fully stopped.
:param check_interval: How long to wait between checks for pipeline status when waiting for completion.
:param verbose: Whether to print steps details when waiting for completion.
Defaults to true, consider turning off for pipelines that have thousands of steps.
:param fail_if_not_running: raises an exception if the pipeline stopped or succeeded before this was run
:return str: Returns the status of the pipeline execution after the operation has been done.
"""
template_fields: Sequence[str] = (
"aws_conn_id",
"pipeline_exec_arn",
)
def __init__(
self,
*,
aws_conn_id: str = DEFAULT_CONN_ID,
pipeline_exec_arn: str,
wait_for_completion: bool = False,
check_interval: int = CHECK_INTERVAL_SECOND,
verbose: bool = True,
fail_if_not_running: bool = False,
**kwargs,
):
super().__init__(config={}, aws_conn_id=aws_conn_id, **kwargs)
self.pipeline_exec_arn = pipeline_exec_arn
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.verbose = verbose
self.fail_if_not_running = fail_if_not_running
def execute(self, context: Context) -> str:
status = self.hook.stop_pipeline(
pipeline_exec_arn=self.pipeline_exec_arn,
wait_for_completion=self.wait_for_completion,
check_interval=self.check_interval,
verbose=self.verbose,
fail_if_not_running=self.fail_if_not_running,
)
self.log.info(
"Stop requested for pipeline execution with ARN %s. Status is now %s",
self.pipeline_exec_arn,
status,
)
return status
class SageMakerRegisterModelVersionOperator(SageMakerBaseOperator):
"""
Register a SageMaker model by creating a model version that specifies the model group to which it belongs.
Will create the model group if it does not exist already.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerRegisterModelVersionOperator`
:param image_uri: The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.
:param model_url: The Amazon S3 path where the model artifacts (the trained weights of the model), which
result from model training, are stored. This path must point to a single gzip compressed tar archive
(.tar.gz suffix).
:param package_group_name: The name of the model package group that the model is going to be registered
to. Will be created if it doesn't already exist.
:param package_group_desc: Description of the model package group, if it was to be created (optional).
:param package_desc: Description of the model package (optional).
:param model_approval: Approval status of the model package. Defaults to PendingManualApproval
:param extras: Can contain extra parameters for the boto call to create_model_package, and/or overrides
for any parameter defined above. For a complete list of available parameters, see
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_model_package
:return str: Returns the ARN of the model package created.
"""
template_fields: Sequence[str] = (
"image_uri",
"model_url",
"package_group_name",
"package_group_desc",
"package_desc",
"model_approval",
)
def __init__(
self,
*,
image_uri: str,
model_url: str,
package_group_name: str,
package_group_desc: str = "",
package_desc: str = "",
model_approval: ApprovalStatus = ApprovalStatus.PENDING_MANUAL_APPROVAL,
extras: dict | None = None,
aws_conn_id: str = DEFAULT_CONN_ID,
config: dict | None = None,
**kwargs,
):
super().__init__(config=config or {}, aws_conn_id=aws_conn_id, **kwargs)
self.image_uri = image_uri
self.model_url = model_url
self.package_group_name = package_group_name
self.package_group_desc = package_group_desc
self.package_desc = package_desc
self.model_approval = model_approval
self.extras = extras
def execute(self, context: Context):
# create a model package group if it does not exist
group_created = self.hook.create_model_package_group(self.package_group_name, self.package_desc)
# then create a model package in that group
input_dict = {
"InferenceSpecification": {
"Containers": [
{
"Image": self.image_uri,
"ModelDataUrl": self.model_url,
}
],
"SupportedContentTypes": ["text/csv"],
"SupportedResponseMIMETypes": ["text/csv"],
},
"ModelPackageGroupName": self.package_group_name,
"ModelPackageDescription": self.package_desc,
"ModelApprovalStatus": self.model_approval.value,
}
if self.extras:
input_dict.update(self.extras) # overrides config above if keys are redefined in extras
try:
res = self.hook.conn.create_model_package(**input_dict)
return res["ModelPackageArn"]
except ClientError:
# rollback group creation if adding the model to it was not successful
if group_created:
self.hook.conn.delete_model_package_group(ModelPackageGroupName=self.package_group_name)
raise
class SageMakerAutoMLOperator(SageMakerBaseOperator):
"""
Creates an auto ML job, learning to predict the given column from the data provided through S3.
The learning output is written to the specified S3 location.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerAutoMLOperator`
:param job_name: Name of the job to create, needs to be unique within the account.
:param s3_input: The S3 location (folder or file) where to fetch the data.
By default, it expects csv with headers.
:param target_attribute: The name of the column containing the values to predict.
:param s3_output: The S3 folder where to write the model artifacts. Must be 128 characters or fewer.
:param role_arn: The ARN of the IAM role to use when interacting with S3.
Must have read access to the input, and write access to the output folder.
:param compressed_input: Set to True if the input is gzipped.
:param time_limit: The maximum amount of time in seconds to spend training the model(s).
:param autodeploy_endpoint_name: If specified, the best model will be deployed to an endpoint with
that name. No deployment made otherwise.
:param extras: Use this dictionary to set any variable input variable for job creation that is not
offered through the parameters of this function. The format is described in:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_auto_ml_job
:param wait_for_completion: Whether to wait for the job to finish before returning. Defaults to True.
:param check_interval: Interval in seconds between 2 status checks when waiting for completion.
:returns: Only if waiting for completion, a dictionary detailing the best model. The structure is that of
the "BestCandidate" key in:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.describe_auto_ml_job
"""
template_fields: Sequence[str] = (
"job_name",
"s3_input",
"target_attribute",
"s3_output",
"role_arn",
"compressed_input",
"time_limit",
"autodeploy_endpoint_name",
"extras",
)
def __init__(
self,
*,
job_name: str,
s3_input: str,
target_attribute: str,
s3_output: str,
role_arn: str,
compressed_input: bool = False,
time_limit: int | None = None,
autodeploy_endpoint_name: str | None = None,
extras: dict | None = None,
wait_for_completion: bool = True,
check_interval: int = 30,
aws_conn_id: str = DEFAULT_CONN_ID,
config: dict | None = None,
**kwargs,
):
super().__init__(config=config or {}, aws_conn_id=aws_conn_id, **kwargs)
self.job_name = job_name
self.s3_input = s3_input
self.target_attribute = target_attribute
self.s3_output = s3_output
self.role_arn = role_arn
self.compressed_input = compressed_input
self.time_limit = time_limit
self.autodeploy_endpoint_name = autodeploy_endpoint_name
self.extras = extras
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
def execute(self, context: Context) -> dict | None:
best = self.hook.create_auto_ml_job(
self.job_name,
self.s3_input,
self.target_attribute,
self.s3_output,
self.role_arn,
self.compressed_input,
self.time_limit,
self.autodeploy_endpoint_name,
self.extras,
self.wait_for_completion,
self.check_interval,
)
return best
class SageMakerCreateExperimentOperator(SageMakerBaseOperator):
"""
Creates a SageMaker experiment, to be then associated to jobs etc.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerCreateExperimentOperator`
:param name: name of the experiment, must be unique within the AWS account
:param description: description of the experiment, optional
:param tags: tags to attach to the experiment, optional
:param aws_conn_id: The AWS connection ID to use.
:returns: the ARN of the experiment created, though experiments are referred to by name
"""
template_fields: Sequence[str] = (
"name",
"description",
"tags",
)
def __init__(
self,
*,
name: str,
description: str | None = None,
tags: dict | None = None,
aws_conn_id: str = DEFAULT_CONN_ID,
**kwargs,
):
super().__init__(config={}, aws_conn_id=aws_conn_id, **kwargs)
self.name = name
self.description = description
self.tags = tags or {}
def execute(self, context: Context) -> str:
sagemaker_hook = SageMakerHook(aws_conn_id=self.aws_conn_id)
params = {
"ExperimentName": self.name,
"Description": self.description,
"Tags": format_tags(self.tags),
}
ans = sagemaker_hook.conn.create_experiment(**trim_none_values(params))
arn = ans["ExperimentArn"]
self.log.info("Experiment %s created successfully with ARN %s.", self.name, arn)
return arn
| 58,086 | 42.608859 | 135 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/redshift_cluster.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.redshift_cluster import RedshiftHook
from airflow.providers.amazon.aws.triggers.redshift_cluster import (
RedshiftCreateClusterSnapshotTrigger,
RedshiftCreateClusterTrigger,
RedshiftDeleteClusterTrigger,
RedshiftPauseClusterTrigger,
RedshiftResumeClusterTrigger,
)
if TYPE_CHECKING:
from airflow.utils.context import Context
class RedshiftCreateClusterOperator(BaseOperator):
"""Creates a new cluster with the specified parameters.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftCreateClusterOperator`
:param cluster_identifier: A unique identifier for the cluster.
:param node_type: The node type to be provisioned for the cluster.
Valid Values: ``ds2.xlarge``, ``ds2.8xlarge``, ``dc1.large``,
``dc1.8xlarge``, ``dc2.large``, ``dc2.8xlarge``, ``ra3.xlplus``,
``ra3.4xlarge``, and ``ra3.16xlarge``.
:param master_username: The username associated with the admin user account for
the cluster that is being created.
:param master_user_password: The password associated with the admin user account for
the cluster that is being created.
:param cluster_type: The type of the cluster ``single-node`` or ``multi-node``.
The default value is ``multi-node``.
:param db_name: The name of the first database to be created when the cluster is created.
:param number_of_nodes: The number of compute nodes in the cluster.
This param require when ``cluster_type`` is ``multi-node``.
:param cluster_security_groups: A list of security groups to be associated with this cluster.
:param vpc_security_group_ids: A list of VPC security groups to be associated with the cluster.
:param cluster_subnet_group_name: The name of a cluster subnet group to be associated with this cluster.
:param availability_zone: The EC2 Availability Zone (AZ).
:param preferred_maintenance_window: The time range (in UTC) during which automated cluster
maintenance can occur.
:param cluster_parameter_group_name: The name of the parameter group to be associated with this cluster.
:param automated_snapshot_retention_period: The number of days that automated snapshots are retained.
The default value is ``1``.
:param manual_snapshot_retention_period: The default number of days to retain a manual snapshot.
:param port: The port number on which the cluster accepts incoming connections.
The Default value is ``5439``.
:param cluster_version: The version of a Redshift engine software that you want to deploy on the cluster.
:param allow_version_upgrade: Whether major version upgrades can be applied during the maintenance window.
The Default value is ``True``.
:parma publicly_accessible: Whether cluster can be accessed from a public network.
:parma encrypted: Whether data in the cluster is encrypted at rest.
The default value is ``False``.
:parma hsm_client_certificate_identifier: Name of the HSM client certificate
the Amazon Redshift cluster uses to retrieve the data.
:parma hsm_configuration_identifier: Name of the HSM configuration
:parma elastic_ip: The Elastic IP (EIP) address for the cluster.
:parma tags: A list of tag instances
:parma kms_key_id: KMS key id of encryption key.
:param enhanced_vpc_routing: Whether to create the cluster with enhanced VPC routing enabled
Default value is ``False``.
:param additional_info: Reserved
:param iam_roles: A list of IAM roles that can be used by the cluster to access other AWS services.
:param maintenance_track_name: Name of the maintenance track for the cluster.
:param snapshot_schedule_identifier: A unique identifier for the snapshot schedule.
:param availability_zone_relocation: Enable relocation for a Redshift cluster
between Availability Zones after the cluster is created.
:param aqua_configuration_status: The cluster is configured to use AQUA .
:param default_iam_role_arn: ARN for the IAM role.
:param aws_conn_id: str = The Airflow connection used for AWS credentials.
The default connection id is ``aws_default``.
:param wait_for_completion: Whether wait for the cluster to be in ``available`` state
:param max_attempt: The maximum number of attempts to be made. Default: 5
:param poll_interval: The amount of time in seconds to wait between attempts. Default: 60
:param deferrable: If True, the operator will run in deferrable mode
"""
template_fields: Sequence[str] = (
"cluster_identifier",
"cluster_type",
"node_type",
"number_of_nodes",
"vpc_security_group_ids",
)
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
def __init__(
self,
*,
cluster_identifier: str,
node_type: str,
master_username: str,
master_user_password: str,
cluster_type: str = "multi-node",
db_name: str = "dev",
number_of_nodes: int = 1,
cluster_security_groups: list[str] | None = None,
vpc_security_group_ids: list[str] | None = None,
cluster_subnet_group_name: str | None = None,
availability_zone: str | None = None,
preferred_maintenance_window: str | None = None,
cluster_parameter_group_name: str | None = None,
automated_snapshot_retention_period: int = 1,
manual_snapshot_retention_period: int | None = None,
port: int = 5439,
cluster_version: str = "1.0",
allow_version_upgrade: bool = True,
publicly_accessible: bool = True,
encrypted: bool = False,
hsm_client_certificate_identifier: str | None = None,
hsm_configuration_identifier: str | None = None,
elastic_ip: str | None = None,
tags: list[Any] | None = None,
kms_key_id: str | None = None,
enhanced_vpc_routing: bool = False,
additional_info: str | None = None,
iam_roles: list[str] | None = None,
maintenance_track_name: str | None = None,
snapshot_schedule_identifier: str | None = None,
availability_zone_relocation: bool | None = None,
aqua_configuration_status: str | None = None,
default_iam_role_arn: str | None = None,
aws_conn_id: str = "aws_default",
wait_for_completion: bool = False,
max_attempt: int = 5,
poll_interval: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.node_type = node_type
self.master_username = master_username
self.master_user_password = master_user_password
self.cluster_type = cluster_type
self.db_name = db_name
self.number_of_nodes = number_of_nodes
self.cluster_security_groups = cluster_security_groups
self.vpc_security_group_ids = vpc_security_group_ids
self.cluster_subnet_group_name = cluster_subnet_group_name
self.availability_zone = availability_zone
self.preferred_maintenance_window = preferred_maintenance_window
self.cluster_parameter_group_name = cluster_parameter_group_name
self.automated_snapshot_retention_period = automated_snapshot_retention_period
self.manual_snapshot_retention_period = manual_snapshot_retention_period
self.port = port
self.cluster_version = cluster_version
self.allow_version_upgrade = allow_version_upgrade
self.publicly_accessible = publicly_accessible
self.encrypted = encrypted
self.hsm_client_certificate_identifier = hsm_client_certificate_identifier
self.hsm_configuration_identifier = hsm_configuration_identifier
self.elastic_ip = elastic_ip
self.tags = tags
self.kms_key_id = kms_key_id
self.enhanced_vpc_routing = enhanced_vpc_routing
self.additional_info = additional_info
self.iam_roles = iam_roles
self.maintenance_track_name = maintenance_track_name
self.snapshot_schedule_identifier = snapshot_schedule_identifier
self.availability_zone_relocation = availability_zone_relocation
self.aqua_configuration_status = aqua_configuration_status
self.default_iam_role_arn = default_iam_role_arn
self.aws_conn_id = aws_conn_id
self.wait_for_completion = wait_for_completion
self.max_attempt = max_attempt
self.poll_interval = poll_interval
self.deferrable = deferrable
self.kwargs = kwargs
def execute(self, context: Context):
redshift_hook = RedshiftHook(aws_conn_id=self.aws_conn_id)
self.log.info("Creating Redshift cluster %s", self.cluster_identifier)
params: dict[str, Any] = {}
if self.db_name:
params["DBName"] = self.db_name
if self.cluster_type:
params["ClusterType"] = self.cluster_type
if self.cluster_type == "multi-node":
params["NumberOfNodes"] = self.number_of_nodes
if self.cluster_security_groups:
params["ClusterSecurityGroups"] = self.cluster_security_groups
if self.vpc_security_group_ids:
params["VpcSecurityGroupIds"] = self.vpc_security_group_ids
if self.cluster_subnet_group_name:
params["ClusterSubnetGroupName"] = self.cluster_subnet_group_name
if self.availability_zone:
params["AvailabilityZone"] = self.availability_zone
if self.preferred_maintenance_window:
params["PreferredMaintenanceWindow"] = self.preferred_maintenance_window
if self.cluster_parameter_group_name:
params["ClusterParameterGroupName"] = self.cluster_parameter_group_name
if self.automated_snapshot_retention_period:
params["AutomatedSnapshotRetentionPeriod"] = self.automated_snapshot_retention_period
if self.manual_snapshot_retention_period:
params["ManualSnapshotRetentionPeriod"] = self.manual_snapshot_retention_period
if self.port:
params["Port"] = self.port
if self.cluster_version:
params["ClusterVersion"] = self.cluster_version
if self.allow_version_upgrade:
params["AllowVersionUpgrade"] = self.allow_version_upgrade
if self.publicly_accessible:
params["PubliclyAccessible"] = self.publicly_accessible
if self.encrypted:
params["Encrypted"] = self.encrypted
if self.hsm_client_certificate_identifier:
params["HsmClientCertificateIdentifier"] = self.hsm_client_certificate_identifier
if self.hsm_configuration_identifier:
params["HsmConfigurationIdentifier"] = self.hsm_configuration_identifier
if self.elastic_ip:
params["ElasticIp"] = self.elastic_ip
if self.tags:
params["Tags"] = self.tags
if self.kms_key_id:
params["KmsKeyId"] = self.kms_key_id
if self.enhanced_vpc_routing:
params["EnhancedVpcRouting"] = self.enhanced_vpc_routing
if self.additional_info:
params["AdditionalInfo"] = self.additional_info
if self.iam_roles:
params["IamRoles"] = self.iam_roles
if self.maintenance_track_name:
params["MaintenanceTrackName"] = self.maintenance_track_name
if self.snapshot_schedule_identifier:
params["SnapshotScheduleIdentifier"] = self.snapshot_schedule_identifier
if self.availability_zone_relocation:
params["AvailabilityZoneRelocation"] = self.availability_zone_relocation
if self.aqua_configuration_status:
params["AquaConfigurationStatus"] = self.aqua_configuration_status
if self.default_iam_role_arn:
params["DefaultIamRoleArn"] = self.default_iam_role_arn
cluster = redshift_hook.create_cluster(
self.cluster_identifier,
self.node_type,
self.master_username,
self.master_user_password,
params,
)
if self.deferrable:
self.defer(
trigger=RedshiftCreateClusterTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempt,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
if self.wait_for_completion:
redshift_hook.get_conn().get_waiter("cluster_available").wait(
ClusterIdentifier=self.cluster_identifier,
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempt,
},
)
self.log.info("Created Redshift cluster %s", self.cluster_identifier)
self.log.info(cluster)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error creating cluster: {event}")
return
class RedshiftCreateClusterSnapshotOperator(BaseOperator):
"""
Creates a manual snapshot of the specified cluster. The cluster must be in the available state.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftCreateClusterSnapshotOperator`
:param snapshot_identifier: A unique identifier for the snapshot that you are requesting
:param cluster_identifier: The cluster identifier for which you want a snapshot
:param retention_period: The number of days that a manual snapshot is retained.
If the value is -1, the manual snapshot is retained indefinitely.
:parma tags: A list of tag instances
:param wait_for_completion: Whether wait for the cluster snapshot to be in ``available`` state
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check state
:param max_attempt: The maximum number of attempts to be made to check the state
:param aws_conn_id: The Airflow connection used for AWS credentials.
The default connection id is ``aws_default``
:param deferrable: If True, the operator will run as a deferrable operator.
"""
template_fields: Sequence[str] = (
"cluster_identifier",
"snapshot_identifier",
)
def __init__(
self,
*,
snapshot_identifier: str,
cluster_identifier: str,
retention_period: int = -1,
tags: list[Any] | None = None,
wait_for_completion: bool = False,
poll_interval: int = 15,
max_attempt: int = 20,
aws_conn_id: str = "aws_default",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.snapshot_identifier = snapshot_identifier
self.cluster_identifier = cluster_identifier
self.retention_period = retention_period
self.tags = tags
self.wait_for_completion = wait_for_completion
self.poll_interval = poll_interval
self.max_attempt = max_attempt
self.deferrable = deferrable
self.aws_conn_id = aws_conn_id
self.redshift_hook = RedshiftHook(aws_conn_id=aws_conn_id)
def execute(self, context: Context) -> Any:
cluster_state = self.redshift_hook.cluster_status(cluster_identifier=self.cluster_identifier)
if cluster_state != "available":
raise AirflowException(
"Redshift cluster must be in available state. "
f"Redshift cluster current state is {cluster_state}"
)
self.redshift_hook.create_cluster_snapshot(
cluster_identifier=self.cluster_identifier,
snapshot_identifier=self.snapshot_identifier,
retention_period=self.retention_period,
tags=self.tags,
)
if self.deferrable:
self.defer(
trigger=RedshiftCreateClusterSnapshotTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempt,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.max_attempt * self.poll_interval + 60),
)
if self.wait_for_completion:
self.redshift_hook.get_conn().get_waiter("snapshot_available").wait(
ClusterIdentifier=self.cluster_identifier,
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempt,
},
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error creating snapshot: {event}")
else:
self.log.info("Cluster snapshot created.")
return
class RedshiftDeleteClusterSnapshotOperator(BaseOperator):
"""
Deletes the specified manual snapshot.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftDeleteClusterSnapshotOperator`
:param snapshot_identifier: A unique identifier for the snapshot that you are requesting
:param cluster_identifier: The unique identifier of the cluster the snapshot was created from
:param wait_for_completion: Whether wait for cluster deletion or not
The default value is ``True``
:param aws_conn_id: The Airflow connection used for AWS credentials.
The default connection id is ``aws_default``
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check snapshot state
"""
template_fields: Sequence[str] = (
"cluster_identifier",
"snapshot_identifier",
)
def __init__(
self,
*,
snapshot_identifier: str,
cluster_identifier: str,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
poll_interval: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.snapshot_identifier = snapshot_identifier
self.cluster_identifier = cluster_identifier
self.wait_for_completion = wait_for_completion
self.poll_interval = poll_interval
self.redshift_hook = RedshiftHook(aws_conn_id=aws_conn_id)
def execute(self, context: Context) -> Any:
self.redshift_hook.get_conn().delete_cluster_snapshot(
SnapshotClusterIdentifier=self.cluster_identifier,
SnapshotIdentifier=self.snapshot_identifier,
)
if self.wait_for_completion:
while self.get_status() is not None:
time.sleep(self.poll_interval)
def get_status(self) -> str:
return self.redshift_hook.get_cluster_snapshot_status(
snapshot_identifier=self.snapshot_identifier,
)
class RedshiftResumeClusterOperator(BaseOperator):
"""
Resume a paused AWS Redshift Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftResumeClusterOperator`
:param cluster_identifier: Unique identifier of the AWS Redshift cluster
:param aws_conn_id: The Airflow connection used for AWS credentials.
The default connection id is ``aws_default``
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check cluster state
:param max_attempts: The maximum number of attempts to check the state of the cluster.
:param wait_for_completion: If True, the operator will wait for the cluster to be in the
`resumed` state. Default is False.
:param deferrable: If True, the operator will run as a deferrable operator.
"""
template_fields: Sequence[str] = ("cluster_identifier",)
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
def __init__(
self,
*,
cluster_identifier: str,
aws_conn_id: str = "aws_default",
wait_for_completion: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 10,
max_attempts: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.aws_conn_id = aws_conn_id
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.max_attempts = max_attempts
self.poll_interval = poll_interval
# These parameters are used to address an issue with the boto3 API where the API
# prematurely reports the cluster as available to receive requests. This causes the cluster
# to reject initial attempts to resume the cluster despite reporting the correct state.
self._remaining_attempts = 10
self._attempt_interval = 15
def execute(self, context: Context):
redshift_hook = RedshiftHook(aws_conn_id=self.aws_conn_id)
self.log.info("Starting resume cluster")
while self._remaining_attempts >= 1:
try:
redshift_hook.get_conn().resume_cluster(ClusterIdentifier=self.cluster_identifier)
break
except redshift_hook.get_conn().exceptions.InvalidClusterStateFault as error:
self._remaining_attempts = self._remaining_attempts - 1
if self._remaining_attempts > 0:
self.log.error(
"Unable to resume cluster. %d attempts remaining.", self._remaining_attempts
)
time.sleep(self._attempt_interval)
else:
raise error
if self.deferrable:
self.defer(
trigger=RedshiftResumeClusterTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.max_attempts * self.poll_interval + 60),
)
if self.wait_for_completion:
waiter = redshift_hook.get_waiter("cluster_resumed")
waiter.wait(
ClusterIdentifier=self.cluster_identifier,
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempts,
},
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error resuming cluster: {event}")
else:
self.log.info("Resumed cluster successfully")
return
class RedshiftPauseClusterOperator(BaseOperator):
"""
Pause an AWS Redshift Cluster if it has status `available`.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftPauseClusterOperator`
:param cluster_identifier: id of the AWS Redshift Cluster
:param aws_conn_id: aws connection to use
:param deferrable: Run operator in the deferrable mode
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check cluster state
:param max_attempts: Maximum number of attempts to poll the cluster
"""
template_fields: Sequence[str] = ("cluster_identifier",)
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
def __init__(
self,
*,
cluster_identifier: str,
aws_conn_id: str = "aws_default",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 10,
max_attempts: int = 15,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.aws_conn_id = aws_conn_id
self.deferrable = deferrable
self.max_attempts = max_attempts
self.poll_interval = poll_interval
# These parameters are used to address an issue with the boto3 API where the API
# prematurely reports the cluster as available to receive requests. This causes the cluster
# to reject initial attempts to pause the cluster despite reporting the correct state.
self._remaining_attempts = 10
self._attempt_interval = 15
def execute(self, context: Context):
redshift_hook = RedshiftHook(aws_conn_id=self.aws_conn_id)
while self._remaining_attempts >= 1:
try:
redshift_hook.get_conn().pause_cluster(ClusterIdentifier=self.cluster_identifier)
break
except redshift_hook.get_conn().exceptions.InvalidClusterStateFault as error:
self._remaining_attempts = self._remaining_attempts - 1
if self._remaining_attempts > 0:
self.log.error(
"Unable to pause cluster. %d attempts remaining.", self._remaining_attempts
)
time.sleep(self._attempt_interval)
else:
raise error
if self.deferrable:
self.defer(
trigger=RedshiftPauseClusterTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.max_attempts * self.poll_interval + 60),
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error pausing cluster: {event}")
else:
self.log.info("Paused cluster successfully")
return
class RedshiftDeleteClusterOperator(BaseOperator):
"""
Delete an AWS Redshift cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftDeleteClusterOperator`
:param cluster_identifier: unique identifier of a cluster
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:param wait_for_completion: Whether wait for cluster deletion or not
The default value is ``True``
:param aws_conn_id: aws connection to use
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check cluster state
:param deferrable: Run operator in the deferrable mode.
:param max_attempts: (Deferrable mode only) The maximum number of attempts to be made
"""
template_fields: Sequence[str] = ("cluster_identifier",)
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
def __init__(
self,
*,
cluster_identifier: str,
skip_final_cluster_snapshot: bool = True,
final_cluster_snapshot_identifier: str | None = None,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
poll_interval: int = 30,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
max_attempts: int = 30,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.skip_final_cluster_snapshot = skip_final_cluster_snapshot
self.final_cluster_snapshot_identifier = final_cluster_snapshot_identifier
self.wait_for_completion = wait_for_completion
self.poll_interval = poll_interval
# These parameters are added to keep trying if there is a running operation in the cluster
# If there is a running operation in the cluster while trying to delete it, a InvalidClusterStateFault
# is thrown. In such case, retrying
self._attempts = 10
self._attempt_interval = 15
self.redshift_hook = RedshiftHook(aws_conn_id=aws_conn_id)
self.aws_conn_id = aws_conn_id
self.deferrable = deferrable
self.max_attempts = max_attempts
def execute(self, context: Context):
while self._attempts >= 1:
try:
self.redshift_hook.delete_cluster(
cluster_identifier=self.cluster_identifier,
skip_final_cluster_snapshot=self.skip_final_cluster_snapshot,
final_cluster_snapshot_identifier=self.final_cluster_snapshot_identifier,
)
break
except self.redshift_hook.get_conn().exceptions.InvalidClusterStateFault:
self._attempts = self._attempts - 1
if self._attempts > 0:
self.log.error("Unable to delete cluster. %d attempts remaining.", self._attempts)
time.sleep(self._attempt_interval)
else:
raise
if self.deferrable:
self.defer(
timeout=timedelta(seconds=self.max_attempts * self.poll_interval + 60),
trigger=RedshiftDeleteClusterTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
waiter = self.redshift_hook.get_conn().get_waiter("cluster_deleted")
waiter.wait(
ClusterIdentifier=self.cluster_identifier,
WaiterConfig={"Delay": self.poll_interval, "MaxAttempts": self.max_attempts},
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error deleting cluster: {event}")
else:
self.log.info("Cluster deleted successfully")
| 32,304 | 44.436006 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/athena.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.athena import AthenaHook
from airflow.providers.amazon.aws.triggers.athena import AthenaTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
class AthenaOperator(BaseOperator):
"""
An operator that submits a presto query to athena.
.. note:: if the task is killed while it runs, it'll cancel the athena query that was launched,
EXCEPT if running in deferrable mode.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AthenaOperator`
:param query: Presto to be run on athena. (templated)
:param database: Database to select. (templated)
:param output_location: s3 path to write the query results into. (templated)
:param aws_conn_id: aws connection to use
:param client_request_token: Unique token created by user to avoid multiple executions of same query
:param workgroup: Athena workgroup in which query will be run. (templated)
:param query_execution_context: Context in which query need to be run
:param result_configuration: Dict with path to store results in and config related to encryption
:param sleep_time: Time (in seconds) to wait between two consecutive calls to check query status on Athena
:param max_polling_attempts: Number of times to poll for query state before function exits
To limit task execution time, use execution_timeout.
:param log_query: Whether to log athena query and other execution params when it's executed.
Defaults to *True*.
"""
ui_color = "#44b5e2"
template_fields: Sequence[str] = ("query", "database", "output_location", "workgroup")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"query": "sql"}
def __init__(
self,
*,
query: str,
database: str,
output_location: str,
aws_conn_id: str = "aws_default",
client_request_token: str | None = None,
workgroup: str = "primary",
query_execution_context: dict[str, str] | None = None,
result_configuration: dict[str, Any] | None = None,
sleep_time: int = 30,
max_polling_attempts: int | None = None,
log_query: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.query = query
self.database = database
self.output_location = output_location
self.aws_conn_id = aws_conn_id
self.client_request_token = client_request_token
self.workgroup = workgroup
self.query_execution_context = query_execution_context or {}
self.result_configuration = result_configuration or {}
self.sleep_time = sleep_time
self.max_polling_attempts = max_polling_attempts or 999999
self.query_execution_id: str | None = None
self.log_query: bool = log_query
self.deferrable = deferrable
@cached_property
def hook(self) -> AthenaHook:
"""Create and return an AthenaHook."""
return AthenaHook(self.aws_conn_id, log_query=self.log_query)
def execute(self, context: Context) -> str | None:
"""Run Presto Query on Athena."""
self.query_execution_context["Database"] = self.database
self.result_configuration["OutputLocation"] = self.output_location
self.query_execution_id = self.hook.run_query(
self.query,
self.query_execution_context,
self.result_configuration,
self.client_request_token,
self.workgroup,
)
if self.deferrable:
self.defer(
trigger=AthenaTrigger(
self.query_execution_id, self.sleep_time, self.max_polling_attempts, self.aws_conn_id
),
method_name="execute_complete",
)
# implicit else:
query_status = self.hook.poll_query_status(
self.query_execution_id,
max_polling_attempts=self.max_polling_attempts,
sleep_time=self.sleep_time,
)
if query_status in AthenaHook.FAILURE_STATES:
error_message = self.hook.get_state_change_reason(self.query_execution_id)
raise Exception(
f"Final state of Athena job is {query_status}, query_execution_id is "
f"{self.query_execution_id}. Error: {error_message}"
)
elif not query_status or query_status in AthenaHook.INTERMEDIATE_STATES:
raise Exception(
f"Final state of Athena job is {query_status}. Max tries of poll status exceeded, "
f"query_execution_id is {self.query_execution_id}."
)
return self.query_execution_id
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while waiting for operation on cluster to complete: {event}")
return event["value"]
def on_kill(self) -> None:
"""Cancel the submitted athena query."""
if self.query_execution_id:
self.log.info("Received a kill signal.")
response = self.hook.stop_query(self.query_execution_id)
http_status_code = None
try:
http_status_code = response["ResponseMetadata"]["HTTPStatusCode"]
except Exception:
self.log.exception(
"Exception while cancelling query. Query execution id: %s", self.query_execution_id
)
finally:
if http_status_code is None or http_status_code != 200:
self.log.error("Unable to request query cancel on athena. Exiting")
else:
self.log.info(
"Polling Athena for query with id %s to reach final state", self.query_execution_id
)
self.hook.poll_query_status(self.query_execution_id, sleep_time=self.sleep_time)
| 7,164 | 42.162651 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/batch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AWS Batch services.
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
- https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
"""
from __future__ import annotations
import warnings
from datetime import timedelta
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.batch_client import BatchClientHook
from airflow.providers.amazon.aws.links.batch import (
BatchJobDefinitionLink,
BatchJobDetailsLink,
BatchJobQueueLink,
)
from airflow.providers.amazon.aws.links.logs import CloudWatchEventsLink
from airflow.providers.amazon.aws.triggers.batch import (
BatchCreateComputeEnvironmentTrigger,
BatchJobTrigger,
)
from airflow.providers.amazon.aws.utils import trim_none_values
from airflow.providers.amazon.aws.utils.task_log_fetcher import AwsTaskLogFetcher
if TYPE_CHECKING:
from airflow.utils.context import Context
class BatchOperator(BaseOperator):
"""Execute a job on AWS Batch.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BatchOperator`
:param job_name: the name for the job that will run on AWS Batch (templated)
:param job_definition: the job definition name on AWS Batch
:param job_queue: the queue name on AWS Batch
:param overrides: DEPRECATED, use container_overrides instead with the same value.
:param container_overrides: the `containerOverrides` parameter for boto3 (templated)
:param node_overrides: the `nodeOverrides` parameter for boto3 (templated)
:param share_identifier: The share identifier for the job. Don't specify this parameter if the job queue
doesn't have a scheduling policy.
:param scheduling_priority_override: The scheduling priority for the job.
Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority.
This overrides any scheduling priority in the job definition
:param array_properties: the `arrayProperties` parameter for boto3
:param parameters: the `parameters` for boto3 (templated)
:param job_id: the job ID, usually unknown (None) until the
submit_job operation gets the jobId defined by AWS Batch
:param waiters: an :py:class:`.BatchWaiters` object (see note below);
if None, polling is used with max_retries and status_retries.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used.
:param region_name: region name to use in AWS Hook.
Override the region_name in connection (if provided)
:param tags: collection of tags to apply to the AWS Batch job submission
if None, no tags are submitted
:param deferrable: Run operator in the deferrable mode.
:param awslogs_enabled: Specifies whether logs from CloudWatch
should be printed or not, False.
If it is an array job, only the logs of the first task will be printed.
:param awslogs_fetch_interval: The interval with which cloudwatch logs are to be fetched, 30 sec.
:param poll_interval: (Deferrable mode only) Time in seconds to wait between polling.
.. note::
Any custom waiters must return a waiter for these calls:
.. code-block:: python
waiter = waiters.get_waiter("JobExists")
waiter = waiters.get_waiter("JobRunning")
waiter = waiters.get_waiter("JobComplete")
"""
ui_color = "#c3dae0"
arn: str | None = None
template_fields: Sequence[str] = (
"job_id",
"job_name",
"job_definition",
"job_queue",
"container_overrides",
"array_properties",
"node_overrides",
"parameters",
"waiters",
"tags",
"wait_for_completion",
"awslogs_enabled",
"awslogs_fetch_interval",
)
template_fields_renderers = {
"container_overrides": "json",
"parameters": "json",
"node_overrides": "json",
}
@property
def operator_extra_links(self):
op_extra_links = [BatchJobDetailsLink()]
if self.wait_for_completion:
op_extra_links.extend([BatchJobDefinitionLink(), BatchJobQueueLink()])
if not self.array_properties:
# There is no CloudWatch Link to the parent Batch Job available.
op_extra_links.append(CloudWatchEventsLink())
return tuple(op_extra_links)
def __init__(
self,
*,
job_name: str,
job_definition: str,
job_queue: str,
overrides: dict | None = None, # deprecated
container_overrides: dict | None = None,
array_properties: dict | None = None,
node_overrides: dict | None = None,
share_identifier: str | None = None,
scheduling_priority_override: int | None = None,
parameters: dict | None = None,
job_id: str | None = None,
waiters: Any | None = None,
max_retries: int | None = None,
status_retries: int | None = None,
aws_conn_id: str | None = None,
region_name: str | None = None,
tags: dict | None = None,
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 30,
awslogs_enabled: bool = False,
awslogs_fetch_interval: timedelta = timedelta(seconds=30),
**kwargs,
) -> None:
BaseOperator.__init__(self, **kwargs)
self.job_id = job_id
self.job_name = job_name
self.job_definition = job_definition
self.job_queue = job_queue
self.container_overrides = container_overrides
# handle `overrides` deprecation in favor of `container_overrides`
if overrides:
if container_overrides:
# disallow setting both old and new params
raise AirflowException(
"'container_overrides' replaces the 'overrides' parameter. "
"You cannot specify both. Please remove assignation to the deprecated 'overrides'."
)
self.container_overrides = overrides
warnings.warn(
"Parameter `overrides` is deprecated, Please use `container_overrides` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.node_overrides = node_overrides
self.share_identifier = share_identifier
self.scheduling_priority_override = scheduling_priority_override
self.array_properties = array_properties
self.parameters = parameters or {}
self.waiters = waiters
self.tags = tags or {}
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.poll_interval = poll_interval
self.awslogs_enabled = awslogs_enabled
self.awslogs_fetch_interval = awslogs_fetch_interval
# params for hook
self.max_retries = max_retries
self.status_retries = status_retries
self.aws_conn_id = aws_conn_id
self.region_name = region_name
@cached_property
def hook(self) -> BatchClientHook:
return BatchClientHook(
max_retries=self.max_retries,
status_retries=self.status_retries,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
)
def execute(self, context: Context):
"""Submit and monitor an AWS Batch job.
:raises: AirflowException
"""
self.submit_job(context)
if self.deferrable:
self.defer(
timeout=self.execution_timeout,
trigger=BatchJobTrigger(
job_id=self.job_id,
waiter_max_attempts=self.max_retries or 10,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
waiter_delay=self.poll_interval,
),
method_name="execute_complete",
)
if self.wait_for_completion:
self.monitor_job(context)
return self.job_id
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
else:
self.log.info("Job completed.")
return event["job_id"]
def on_kill(self):
response = self.hook.client.terminate_job(jobId=self.job_id, reason="Task killed by the user")
self.log.info("AWS Batch job (%s) terminated: %s", self.job_id, response)
def submit_job(self, context: Context):
"""Submit an AWS Batch job.
:raises: AirflowException
"""
self.log.info(
"Running AWS Batch job - job definition: %s - on queue %s",
self.job_definition,
self.job_queue,
)
if self.container_overrides:
self.log.info("AWS Batch job - container overrides: %s", self.container_overrides)
if self.array_properties:
self.log.info("AWS Batch job - array properties: %s", self.array_properties)
if self.node_overrides:
self.log.info("AWS Batch job - node properties: %s", self.node_overrides)
args = {
"jobName": self.job_name,
"jobQueue": self.job_queue,
"jobDefinition": self.job_definition,
"arrayProperties": self.array_properties,
"parameters": self.parameters,
"tags": self.tags,
"containerOverrides": self.container_overrides,
"nodeOverrides": self.node_overrides,
"shareIdentifier": self.share_identifier,
"schedulingPriorityOverride": self.scheduling_priority_override,
}
try:
response = self.hook.client.submit_job(**trim_none_values(args))
except Exception as e:
self.log.error(
"AWS Batch job failed submission - job definition: %s - on queue %s",
self.job_definition,
self.job_queue,
)
raise AirflowException(e)
self.job_id = response["jobId"]
self.log.info("AWS Batch job (%s) started: %s", self.job_id, response)
BatchJobDetailsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_id=self.job_id,
)
def monitor_job(self, context: Context):
"""Monitor an AWS Batch job.
This can raise an exception or an AirflowTaskTimeout if the task was
created with ``execution_timeout``.
"""
if not self.job_id:
raise AirflowException("AWS Batch job - job_id was not found")
try:
job_desc = self.hook.get_job_description(self.job_id)
job_definition_arn = job_desc["jobDefinition"]
job_queue_arn = job_desc["jobQueue"]
self.log.info(
"AWS Batch job (%s) Job Definition ARN: %r, Job Queue ARN: %r",
self.job_id,
job_definition_arn,
job_queue_arn,
)
except KeyError:
self.log.warning("AWS Batch job (%s) can't get Job Definition ARN and Job Queue ARN", self.job_id)
else:
BatchJobDefinitionLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_definition_arn=job_definition_arn,
)
BatchJobQueueLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_queue_arn=job_queue_arn,
)
if self.awslogs_enabled:
if self.waiters:
self.waiters.wait_for_job(self.job_id, get_batch_log_fetcher=self._get_batch_log_fetcher)
else:
self.hook.wait_for_job(self.job_id, get_batch_log_fetcher=self._get_batch_log_fetcher)
else:
if self.waiters:
self.waiters.wait_for_job(self.job_id)
else:
self.hook.wait_for_job(self.job_id)
awslogs = self.hook.get_job_all_awslogs_info(self.job_id)
if awslogs:
self.log.info("AWS Batch job (%s) CloudWatch Events details found. Links to logs:", self.job_id)
link_builder = CloudWatchEventsLink()
for log in awslogs:
self.log.info(link_builder.format_link(**log))
if len(awslogs) > 1:
# there can be several log streams on multi-node jobs
self.log.warning(
"out of all those logs, we can only link to one in the UI. Using the first one."
)
CloudWatchEventsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
**awslogs[0],
)
self.hook.check_job_success(self.job_id)
self.log.info("AWS Batch job (%s) succeeded", self.job_id)
def _get_batch_log_fetcher(self, job_id: str) -> AwsTaskLogFetcher | None:
awslog_info = self.hook.get_job_awslogs_info(job_id)
if not awslog_info:
return None
return AwsTaskLogFetcher(
aws_conn_id=self.aws_conn_id,
region_name=awslog_info["awslogs_region"],
log_group=awslog_info["awslogs_group"],
log_stream_name=awslog_info["awslogs_stream_name"],
fetch_interval=self.awslogs_fetch_interval,
logger=self.log,
)
class BatchCreateComputeEnvironmentOperator(BaseOperator):
"""Create an AWS Batch compute environment.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BatchCreateComputeEnvironmentOperator`
:param compute_environment_name: Name of the AWS batch compute
environment (templated).
:param environment_type: Type of the compute-environment.
:param state: State of the compute-environment.
:param compute_resources: Details about the resources managed by the
compute-environment (templated). More details:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.create_compute_environment
:param unmanaged_v_cpus: Maximum number of vCPU for an unmanaged compute
environment. This parameter is only supported when the ``type``
parameter is set to ``UNMANAGED``.
:param service_role: IAM role that allows Batch to make calls to other AWS
services on your behalf (templated).
:param tags: Tags that you apply to the compute-environment to help you
categorize and organize your resources.
:param poll_interval: How long to wait in seconds between 2 polls at the environment status.
Only useful when deferrable is True.
:param max_retries: How many times to poll for the environment status.
Only useful when deferrable is True.
:param aws_conn_id: Connection ID of AWS credentials / region name. If None,
credential boto3 strategy will be used.
:param region_name: Region name to use in AWS Hook. Overrides the
``region_name`` in connection if provided.
:param deferrable: If True, the operator will wait asynchronously for the environment to be created.
This mode requires aiobotocore module to be installed. (default: False)
"""
template_fields: Sequence[str] = (
"compute_environment_name",
"compute_resources",
"service_role",
)
template_fields_renderers = {"compute_resources": "json"}
def __init__(
self,
compute_environment_name: str,
environment_type: str,
state: str,
compute_resources: dict,
unmanaged_v_cpus: int | None = None,
service_role: str | None = None,
tags: dict | None = None,
poll_interval: int = 30,
max_retries: int | None = None,
aws_conn_id: str | None = None,
region_name: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
if "status_retries" in kwargs:
warnings.warn(
"The `status_retries` parameter is unused and should be removed. "
"It'll be deleted in a future version.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs.pop("status_retries") # remove before calling super() to prevent unexpected arg error
super().__init__(**kwargs)
self.compute_environment_name = compute_environment_name
self.environment_type = environment_type
self.state = state
self.unmanaged_v_cpus = unmanaged_v_cpus
self.compute_resources = compute_resources
self.service_role = service_role
self.tags = tags or {}
self.poll_interval = poll_interval
self.max_retries = max_retries or 120
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.deferrable = deferrable
@cached_property
def hook(self):
"""Create and return a BatchClientHook."""
return BatchClientHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
)
def execute(self, context: Context):
"""Create an AWS batch compute environment."""
kwargs: dict[str, Any] = {
"computeEnvironmentName": self.compute_environment_name,
"type": self.environment_type,
"state": self.state,
"unmanagedvCpus": self.unmanaged_v_cpus,
"computeResources": self.compute_resources,
"serviceRole": self.service_role,
"tags": self.tags,
}
response = self.hook.client.create_compute_environment(**trim_none_values(kwargs))
arn = response["computeEnvironmentArn"]
if self.deferrable:
self.defer(
trigger=BatchCreateComputeEnvironmentTrigger(
arn, self.poll_interval, self.max_retries, self.aws_conn_id, self.region_name
),
method_name="execute_complete",
)
self.log.info("AWS Batch compute environment created successfully")
return arn
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error while waiting for the compute environment to be ready: {event}")
return event["value"]
| 20,436 | 39.469307 | 133 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/datasync.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Create, get, update, execute and delete an AWS DataSync Task."""
from __future__ import annotations
import logging
import random
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.datasync import DataSyncHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataSyncOperator(BaseOperator):
"""Find, Create, Update, Execute and Delete AWS DataSync Tasks.
If ``do_xcom_push`` is True, then the DataSync TaskArn and TaskExecutionArn
which were executed will be pushed to an XCom.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataSyncOperator`
.. note:: There may be 0, 1, or many existing DataSync Tasks defined in your AWS
environment. The default behavior is to create a new Task if there are 0, or
execute the Task if there was 1 Task, or fail if there were many Tasks.
:param aws_conn_id: AWS connection to use.
:param wait_interval_seconds: Time to wait between two
consecutive calls to check TaskExecution status.
:param max_iterations: Maximum number of
consecutive calls to check TaskExecution status.
:param wait_for_completion: If True, wait for the task execution to reach a final state
:param task_arn: AWS DataSync TaskArn to use. If None, then this operator will
attempt to either search for an existing Task or attempt to create a new Task.
:param source_location_uri: Source location URI to search for. All DataSync
Tasks with a LocationArn with this URI will be considered.
Example: ``smb://server/subdir``
:param destination_location_uri: Destination location URI to search for.
All DataSync Tasks with a LocationArn with this URI will be considered.
Example: ``s3://airflow_bucket/stuff``
:param allow_random_task_choice: If multiple Tasks match, one must be chosen to
execute. If allow_random_task_choice is True then a random one is chosen.
:param allow_random_location_choice: If multiple Locations match, one must be chosen
when creating a task. If allow_random_location_choice is True then a random one is chosen.
:param create_task_kwargs: If no suitable TaskArn is identified,
it will be created if ``create_task_kwargs`` is defined.
``create_task_kwargs`` is then used internally like this:
``boto3.create_task(**create_task_kwargs)``
Example: ``{'Name': 'xyz', 'Options': ..., 'Excludes': ..., 'Tags': ...}``
:param create_source_location_kwargs: If no suitable LocationArn is found,
a Location will be created if ``create_source_location_kwargs`` is defined.
``create_source_location_kwargs`` is then used internally like this:
``boto3.create_location_xyz(**create_source_location_kwargs)``
The xyz is determined from the prefix of source_location_uri, eg ``smb:/...`` or ``s3:/...``
Example: ``{'Subdirectory': ..., 'ServerHostname': ..., ...}``
:param create_destination_location_kwargs: If no suitable LocationArn is found,
a Location will be created if ``create_destination_location_kwargs`` is defined.
``create_destination_location_kwargs`` is used internally like this:
``boto3.create_location_xyz(**create_destination_location_kwargs)``
The xyz is determined from the prefix of destination_location_uri, eg ``smb:/...` or ``s3:/...``
Example: ``{'S3BucketArn': ..., 'S3Config': {'BucketAccessRoleArn': ...}, ...}``
:param update_task_kwargs: If a suitable TaskArn is found or created,
it will be updated if ``update_task_kwargs`` is defined.
``update_task_kwargs`` is used internally like this:
``boto3.update_task(TaskArn=task_arn, **update_task_kwargs)``
Example: ``{'Name': 'xyz', 'Options': ..., 'Excludes': ...}``
:param task_execution_kwargs: Additional kwargs passed directly when starting the
Task execution, used internally like this:
``boto3.start_task_execution(TaskArn=task_arn, **task_execution_kwargs)``
:param delete_task_after_execution: If True then the TaskArn which was executed
will be deleted from AWS DataSync on successful completion.
:raises AirflowException: If ``task_arn`` was not specified, or if
either ``source_location_uri`` or ``destination_location_uri`` were
not specified.
:raises AirflowException: If source or destination Location were not found
and could not be created.
:raises AirflowException: If ``choose_task`` or ``choose_location`` fails.
:raises AirflowException: If Task creation, update, execution or delete fails.
"""
template_fields: Sequence[str] = (
"task_arn",
"source_location_uri",
"destination_location_uri",
"create_task_kwargs",
"create_source_location_kwargs",
"create_destination_location_kwargs",
"update_task_kwargs",
"task_execution_kwargs",
)
template_fields_renderers = {
"create_task_kwargs": "json",
"create_source_location_kwargs": "json",
"create_destination_location_kwargs": "json",
"update_task_kwargs": "json",
"task_execution_kwargs": "json",
}
ui_color = "#44b5e2"
def __init__(
self,
*,
aws_conn_id: str = "aws_default",
wait_interval_seconds: int = 30,
max_iterations: int = 60,
wait_for_completion: bool = True,
task_arn: str | None = None,
source_location_uri: str | None = None,
destination_location_uri: str | None = None,
allow_random_task_choice: bool = False,
allow_random_location_choice: bool = False,
create_task_kwargs: dict | None = None,
create_source_location_kwargs: dict | None = None,
create_destination_location_kwargs: dict | None = None,
update_task_kwargs: dict | None = None,
task_execution_kwargs: dict | None = None,
delete_task_after_execution: bool = False,
**kwargs,
):
super().__init__(**kwargs)
# Assignments
self.aws_conn_id = aws_conn_id
self.wait_interval_seconds = wait_interval_seconds
self.max_iterations = max_iterations
self.wait_for_completion = wait_for_completion
self.task_arn = task_arn
self.source_location_uri = source_location_uri
self.destination_location_uri = destination_location_uri
self.allow_random_task_choice = allow_random_task_choice
self.allow_random_location_choice = allow_random_location_choice
self.create_task_kwargs = create_task_kwargs if create_task_kwargs else {}
self.create_source_location_kwargs = {}
if create_source_location_kwargs:
self.create_source_location_kwargs = create_source_location_kwargs
self.create_destination_location_kwargs = {}
if create_destination_location_kwargs:
self.create_destination_location_kwargs = create_destination_location_kwargs
self.update_task_kwargs = update_task_kwargs if update_task_kwargs else {}
self.task_execution_kwargs = task_execution_kwargs if task_execution_kwargs else {}
self.delete_task_after_execution = delete_task_after_execution
# Validations
valid = False
if self.task_arn:
valid = True
if self.source_location_uri and self.destination_location_uri:
valid = True
if not valid:
raise AirflowException(
f"Either specify task_arn or both source_location_uri and destination_location_uri. "
f"task_arn={task_arn!r}, source_location_uri={source_location_uri!r}, "
f"destination_location_uri={destination_location_uri!r}"
)
# Others
self.hook: DataSyncHook | None = None
# Candidates - these are found in AWS as possible things
# for us to use
self.candidate_source_location_arns: list[str] | None = None
self.candidate_destination_location_arns: list[str] | None = None
self.candidate_task_arns: list[str] | None = None
# Actuals
self.source_location_arn: str | None = None
self.destination_location_arn: str | None = None
self.task_execution_arn: str | None = None
def get_hook(self) -> DataSyncHook:
"""Create and return DataSyncHook.
:return DataSyncHook: An DataSyncHook instance.
"""
if self.hook:
return self.hook
self.hook = DataSyncHook(
aws_conn_id=self.aws_conn_id,
wait_interval_seconds=self.wait_interval_seconds,
)
return self.hook
def execute(self, context: Context):
# If task_arn was not specified then try to
# find 0, 1 or many candidate DataSync Tasks to run
if not self.task_arn:
self._get_tasks_and_locations()
# If some were found, identify which one to run
if self.candidate_task_arns:
self.task_arn = self.choose_task(self.candidate_task_arns)
# If we could not find one then try to create one
if not self.task_arn and self.create_task_kwargs:
self._create_datasync_task()
if not self.task_arn:
raise AirflowException("DataSync TaskArn could not be identified or created.")
self.log.info("Using DataSync TaskArn %s", self.task_arn)
# Update the DataSync Task
if self.update_task_kwargs:
self._update_datasync_task()
# Execute the DataSync Task
self._execute_datasync_task()
if not self.task_execution_arn:
raise AirflowException("Nothing was executed")
# Delete the DataSyncTask
if self.delete_task_after_execution:
self._delete_datasync_task()
return {"TaskArn": self.task_arn, "TaskExecutionArn": self.task_execution_arn}
def _get_tasks_and_locations(self) -> None:
"""Find existing DataSync Task based on source and dest Locations."""
hook = self.get_hook()
self.candidate_source_location_arns = self._get_location_arns(self.source_location_uri)
self.candidate_destination_location_arns = self._get_location_arns(self.destination_location_uri)
if not self.candidate_source_location_arns:
self.log.info("No matching source Locations")
return
if not self.candidate_destination_location_arns:
self.log.info("No matching destination Locations")
return
self.log.info("Finding DataSync TaskArns that have these LocationArns")
self.candidate_task_arns = hook.get_task_arns_for_location_arns(
self.candidate_source_location_arns,
self.candidate_destination_location_arns,
)
self.log.info("Found candidate DataSync TaskArns %s", self.candidate_task_arns)
def choose_task(self, task_arn_list: list) -> str | None:
"""Select 1 DataSync TaskArn from a list."""
if not task_arn_list:
return None
if len(task_arn_list) == 1:
return task_arn_list[0]
if self.allow_random_task_choice:
# Items are unordered so we don't want to just take
# the [0] one as it implies ordered items were received
# from AWS and might lead to confusion. Rather explicitly
# choose a random one
return random.choice(task_arn_list)
raise AirflowException(f"Unable to choose a Task from {task_arn_list}")
def choose_location(self, location_arn_list: list[str] | None) -> str | None:
"""Select 1 DataSync LocationArn from a list."""
if not location_arn_list:
return None
if len(location_arn_list) == 1:
return location_arn_list[0]
if self.allow_random_location_choice:
# Items are unordered so we don't want to just take
# the [0] one as it implies ordered items were received
# from AWS and might lead to confusion. Rather explicitly
# choose a random one
return random.choice(location_arn_list)
raise AirflowException(f"Unable to choose a Location from {location_arn_list}")
def _create_datasync_task(self) -> None:
"""Create a AWS DataSyncTask."""
hook = self.get_hook()
self.source_location_arn = self.choose_location(self.candidate_source_location_arns)
if not self.source_location_arn and self.source_location_uri and self.create_source_location_kwargs:
self.log.info("Attempting to create source Location")
self.source_location_arn = hook.create_location(
self.source_location_uri, **self.create_source_location_kwargs
)
if not self.source_location_arn:
raise AirflowException(
"Unable to determine source LocationArn. Does a suitable DataSync Location exist?"
)
self.destination_location_arn = self.choose_location(self.candidate_destination_location_arns)
if (
not self.destination_location_arn
and self.destination_location_uri
and self.create_destination_location_kwargs
):
self.log.info("Attempting to create destination Location")
self.destination_location_arn = hook.create_location(
self.destination_location_uri, **self.create_destination_location_kwargs
)
if not self.destination_location_arn:
raise AirflowException(
"Unable to determine destination LocationArn. Does a suitable DataSync Location exist?"
)
self.log.info("Creating a Task.")
self.task_arn = hook.create_task(
self.source_location_arn, self.destination_location_arn, **self.create_task_kwargs
)
if not self.task_arn:
raise AirflowException("Task could not be created")
self.log.info("Created a Task with TaskArn %s", self.task_arn)
def _update_datasync_task(self) -> None:
"""Update a AWS DataSyncTask."""
if not self.task_arn:
return
hook = self.get_hook()
self.log.info("Updating TaskArn %s", self.task_arn)
hook.update_task(self.task_arn, **self.update_task_kwargs)
self.log.info("Updated TaskArn %s", self.task_arn)
def _execute_datasync_task(self) -> None:
"""Create and monitor an AWS DataSync TaskExecution for a Task."""
if not self.task_arn:
raise AirflowException("Missing TaskArn")
hook = self.get_hook()
# Create a task execution:
self.log.info("Starting execution for TaskArn %s", self.task_arn)
self.task_execution_arn = hook.start_task_execution(self.task_arn, **self.task_execution_kwargs)
self.log.info("Started TaskExecutionArn %s", self.task_execution_arn)
if not self.wait_for_completion:
return
# Wait for task execution to complete
self.log.info("Waiting for TaskExecutionArn %s", self.task_execution_arn)
try:
result = hook.wait_for_task_execution(self.task_execution_arn, max_iterations=self.max_iterations)
except (AirflowTaskTimeout, AirflowException) as e:
self.log.error("Cancelling TaskExecution after Exception: %s", e)
self._cancel_datasync_task_execution()
raise
self.log.info("Completed TaskExecutionArn %s", self.task_execution_arn)
task_execution_description = hook.describe_task_execution(task_execution_arn=self.task_execution_arn)
self.log.info("task_execution_description=%s", task_execution_description)
# Log some meaningful statuses
level = logging.ERROR if not result else logging.INFO
self.log.log(level, "Status=%s", task_execution_description["Status"])
if "Result" in task_execution_description:
for k, v in task_execution_description["Result"].items():
if "Status" in k or "Error" in k:
self.log.log(level, "%s=%s", k, v)
if not result:
raise AirflowException(f"Failed TaskExecutionArn {self.task_execution_arn}")
def _cancel_datasync_task_execution(self):
"""Cancel the submitted DataSync task."""
hook = self.get_hook()
if self.task_execution_arn:
self.log.info("Cancelling TaskExecutionArn %s", self.task_execution_arn)
hook.cancel_task_execution(task_execution_arn=self.task_execution_arn)
self.log.info("Cancelled TaskExecutionArn %s", self.task_execution_arn)
def on_kill(self):
self.log.error("Cancelling TaskExecution after task was killed")
self._cancel_datasync_task_execution()
def _delete_datasync_task(self) -> None:
"""Deletes an AWS DataSync Task."""
if not self.task_arn:
return
hook = self.get_hook()
# Delete task:
self.log.info("Deleting Task with TaskArn %s", self.task_arn)
hook.delete_task(self.task_arn)
self.log.info("Task Deleted")
def _get_location_arns(self, location_uri) -> list[str]:
location_arns = self.get_hook().get_location_arns(location_uri)
self.log.info("Found LocationArns %s for LocationUri %s", location_arns, location_uri)
return location_arns
| 18,421 | 44.48642 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/redshift_data.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.redshift_data import RedshiftDataHook
if TYPE_CHECKING:
from mypy_boto3_redshift_data.type_defs import GetStatementResultResponseTypeDef
from airflow.utils.context import Context
class RedshiftDataOperator(BaseOperator):
"""
Executes SQL Statements against an Amazon Redshift cluster using Redshift Data.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftDataOperator`
:param database: the name of the database
:param sql: the SQL statement or list of SQL statement to run
:param cluster_identifier: unique identifier of a cluster
:param db_user: the database username
:param parameters: the parameters for the SQL statement
:param secret_arn: the name or ARN of the secret that enables db access
:param statement_name: the name of the SQL statement
:param with_event: indicates whether to send an event to EventBridge
:param wait_for_completion: indicates whether to wait for a result, if True wait, if False don't wait
:param poll_interval: how often in seconds to check the query status
:param return_sql_result: if True will return the result of an SQL statement,
if False (default) will return statement ID
:param aws_conn_id: aws connection to use
:param region: aws region to use
"""
template_fields = (
"cluster_identifier",
"database",
"sql",
"db_user",
"parameters",
"statement_name",
"aws_conn_id",
"region",
)
template_ext = (".sql",)
template_fields_renderers = {"sql": "sql"}
statement_id: str | None
def __init__(
self,
database: str,
sql: str | list,
cluster_identifier: str | None = None,
db_user: str | None = None,
parameters: list | None = None,
secret_arn: str | None = None,
statement_name: str | None = None,
with_event: bool = False,
wait_for_completion: bool = True,
poll_interval: int = 10,
return_sql_result: bool = False,
aws_conn_id: str = "aws_default",
region: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.database = database
self.sql = sql
self.cluster_identifier = cluster_identifier
self.db_user = db_user
self.parameters = parameters
self.secret_arn = secret_arn
self.statement_name = statement_name
self.with_event = with_event
self.wait_for_completion = wait_for_completion
if poll_interval > 0:
self.poll_interval = poll_interval
else:
self.log.warning(
"Invalid poll_interval:",
poll_interval,
)
self.return_sql_result = return_sql_result
self.aws_conn_id = aws_conn_id
self.region = region
self.statement_id: str | None = None
@cached_property
def hook(self) -> RedshiftDataHook:
"""Create and return an RedshiftDataHook."""
return RedshiftDataHook(aws_conn_id=self.aws_conn_id, region_name=self.region)
def execute(self, context: Context) -> GetStatementResultResponseTypeDef | str:
"""Execute a statement against Amazon Redshift."""
self.log.info("Executing statement: %s", self.sql)
self.statement_id = self.hook.execute_query(
database=self.database,
sql=self.sql,
cluster_identifier=self.cluster_identifier,
db_user=self.db_user,
parameters=self.parameters,
secret_arn=self.secret_arn,
statement_name=self.statement_name,
with_event=self.with_event,
wait_for_completion=self.wait_for_completion,
poll_interval=self.poll_interval,
)
if self.return_sql_result:
result = self.hook.conn.get_statement_result(Id=self.statement_id)
self.log.debug("Statement result: %s", result)
return result
else:
return self.statement_id
def on_kill(self) -> None:
"""Cancel the submitted redshift query."""
if self.statement_id:
self.log.info("Received a kill signal.")
self.log.info("Stopping Query with statementId - %s", self.statement_id)
try:
self.hook.conn.cancel_statement(Id=self.statement_id)
except Exception as ex:
self.log.error("Unable to cancel query. Exiting. %s", ex)
| 5,547 | 36.486486 | 105 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/lambda_function.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.lambda_function import LambdaHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class LambdaCreateFunctionOperator(BaseOperator):
"""
Creates an AWS Lambda function.
More information regarding parameters of this operator can be found here
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.create_function
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LambdaCreateFunctionOperator`
:param function_name: The name of the AWS Lambda function, version, or alias.
:param runtime: The identifier of the function's runtime. Runtime is required if the deployment package
is a .zip file archive.
:param role: The Amazon Resource Name (ARN) of the function's execution role.
:param handler: The name of the method within your code that Lambda calls to run your function.
Handler is required if the deployment package is a .zip file archive.
:param code: The code for the function.
:param description: A description of the function.
:param timeout: The amount of time (in seconds) that Lambda allows a function to run before stopping it.
:param config: Optional dictionary for arbitrary parameters to the boto API create_lambda call.
:param wait_for_completion: If True, the operator will wait until the function is active.
:param aws_conn_id: The AWS connection ID to use
"""
template_fields: Sequence[str] = (
"function_name",
"runtime",
"role",
"handler",
"code",
"config",
)
ui_color = "#ff7300"
def __init__(
self,
*,
function_name: str,
runtime: str | None = None,
role: str,
handler: str | None = None,
code: dict,
description: str | None = None,
timeout: int | None = None,
config: dict = {},
wait_for_completion: bool = False,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.function_name = function_name
self.runtime = runtime
self.role = role
self.handler = handler
self.code = code
self.description = description
self.timeout = timeout
self.config = config
self.wait_for_completion = wait_for_completion
self.aws_conn_id = aws_conn_id
@cached_property
def hook(self) -> LambdaHook:
return LambdaHook(aws_conn_id=self.aws_conn_id)
def execute(self, context: Context):
self.log.info("Creating AWS Lambda function: %s", self.function_name)
response = self.hook.create_lambda(
function_name=self.function_name,
runtime=self.runtime,
role=self.role,
handler=self.handler,
code=self.code,
description=self.description,
timeout=self.timeout,
**self.config,
)
self.log.info("Lambda response: %r", response)
if self.wait_for_completion:
self.log.info("Wait for Lambda function to be active")
waiter = self.hook.conn.get_waiter("function_active_v2")
waiter.wait(
FunctionName=self.function_name,
)
return response.get("FunctionArn")
class LambdaInvokeFunctionOperator(BaseOperator):
"""
Invokes an AWS Lambda function.
You can invoke a function synchronously (and wait for the response), or asynchronously.
To invoke a function asynchronously, set `invocation_type` to `Event`. For more details,
review the boto3 Lambda invoke docs.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LambdaInvokeFunctionOperator`
:param function_name: The name of the AWS Lambda function, version, or alias.
:param log_type: Set to Tail to include the execution log in the response. Otherwise, set to "None".
:param qualifier: Specify a version or alias to invoke a published version of the function.
:param invocation_type: AWS Lambda invocation type (RequestResponse, Event, DryRun)
:param client_context: Data about the invoking client to pass to the function in the context object
:param payload: JSON provided as input to the Lambda function
:param aws_conn_id: The AWS connection ID to use
"""
template_fields: Sequence[str] = ("function_name", "payload", "qualifier", "invocation_type")
ui_color = "#ff7300"
def __init__(
self,
*,
function_name: str,
log_type: str | None = None,
qualifier: str | None = None,
invocation_type: str | None = None,
client_context: str | None = None,
payload: bytes | str | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.function_name = function_name
self.payload = payload
self.log_type = log_type
self.qualifier = qualifier
self.invocation_type = invocation_type
self.client_context = client_context
self.aws_conn_id = aws_conn_id
@cached_property
def hook(self) -> LambdaHook:
return LambdaHook(aws_conn_id=self.aws_conn_id)
def execute(self, context: Context):
"""
Invokes the target AWS Lambda function from Airflow.
:return: The response payload from the function, or an error object.
"""
success_status_codes = [200, 202, 204]
self.log.info("Invoking AWS Lambda function: %s with payload: %s", self.function_name, self.payload)
response = self.hook.invoke_lambda(
function_name=self.function_name,
invocation_type=self.invocation_type,
log_type=self.log_type,
client_context=self.client_context,
payload=self.payload,
qualifier=self.qualifier,
)
self.log.info("Lambda response metadata: %r", response.get("ResponseMetadata"))
if response.get("StatusCode") not in success_status_codes:
raise ValueError("Lambda function did not execute", json.dumps(response.get("ResponseMetadata")))
payload_stream = response.get("Payload")
payload = payload_stream.read().decode()
if "FunctionError" in response:
raise ValueError(
"Lambda function execution resulted in error",
{"ResponseMetadata": response.get("ResponseMetadata"), "Payload": payload},
)
self.log.info("Lambda function invocation succeeded: %r", response.get("ResponseMetadata"))
return payload
| 7,749 | 38.141414 | 120 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/ecs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import re
import sys
import warnings
from datetime import timedelta
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
import boto3
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator, XCom
from airflow.providers.amazon.aws.exceptions import EcsOperatorError, EcsTaskFailToStart
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.ecs import EcsClusterStates, EcsHook, should_retry_eni
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.providers.amazon.aws.triggers.ecs import (
ClusterActiveTrigger,
ClusterInactiveTrigger,
TaskDoneTrigger,
)
from airflow.providers.amazon.aws.utils.task_log_fetcher import AwsTaskLogFetcher
from airflow.utils.helpers import prune_dict
from airflow.utils.session import provide_session
if TYPE_CHECKING:
from airflow.utils.context import Context
DEFAULT_CONN_ID = "aws_default"
class EcsBaseOperator(BaseOperator):
"""This is the base operator for all Elastic Container Service operators."""
def __init__(self, *, aws_conn_id: str | None = DEFAULT_CONN_ID, region: str | None = None, **kwargs):
self.aws_conn_id = aws_conn_id
self.region = region
super().__init__(**kwargs)
@cached_property
def hook(self) -> EcsHook:
"""Create and return an EcsHook."""
return EcsHook(aws_conn_id=self.aws_conn_id, region_name=self.region)
@cached_property
def client(self) -> boto3.client:
"""Create and return the EcsHook's client."""
return self.hook.conn
def execute(self, context: Context):
"""Must overwrite in child classes."""
raise NotImplementedError("Please implement execute() in subclass")
def _complete_exec_with_cluster_desc(self, context, event=None):
"""To be used as trigger callback for operators that return the cluster description."""
if event["status"] != "success":
raise AirflowException(f"Error while waiting for operation on cluster to complete: {event}")
cluster_arn = event.get("arn")
# We cannot get the cluster definition from the waiter on success, so we have to query it here.
details = self.hook.conn.describe_clusters(clusters=[cluster_arn])["clusters"][0]
return details
class EcsCreateClusterOperator(EcsBaseOperator):
"""
Creates an AWS ECS cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EcsCreateClusterOperator`
:param cluster_name: The name of your cluster. If you don't specify a name for your
cluster, you create a cluster that's named default.
:param create_cluster_kwargs: Extra arguments for Cluster Creation.
:param wait_for_completion: If True, waits for creation of the cluster to complete. (default: True)
:param waiter_delay: The amount of time in seconds to wait between attempts,
if not set then the default waiter value will be used.
:param waiter_max_attempts: The maximum number of attempts to be made,
if not set then the default waiter value will be used.
:param deferrable: If True, the operator will wait asynchronously for the job to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"cluster_name",
"create_cluster_kwargs",
"wait_for_completion",
"deferrable",
)
def __init__(
self,
*,
cluster_name: str,
create_cluster_kwargs: dict | None = None,
wait_for_completion: bool = True,
waiter_delay: int = 15,
waiter_max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.create_cluster_kwargs = create_cluster_kwargs or {}
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context):
self.log.info(
"Creating cluster %r using the following values: %s",
self.cluster_name,
self.create_cluster_kwargs,
)
result = self.client.create_cluster(clusterName=self.cluster_name, **self.create_cluster_kwargs)
cluster_details = result["cluster"]
cluster_state = cluster_details.get("status")
if cluster_state == EcsClusterStates.ACTIVE:
# In some circumstances the ECS Cluster is created immediately,
# and there is no reason to wait for completion.
self.log.info("Cluster %r in state: %r.", self.cluster_name, cluster_state)
elif self.deferrable:
self.defer(
trigger=ClusterActiveTrigger(
cluster_arn=cluster_details["clusterArn"],
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
),
method_name="_complete_exec_with_cluster_desc",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
elif self.wait_for_completion:
waiter = self.hook.get_waiter("cluster_active")
waiter.wait(
clusters=[cluster_details["clusterArn"]],
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
)
return cluster_details
class EcsDeleteClusterOperator(EcsBaseOperator):
"""
Deletes an AWS ECS cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EcsDeleteClusterOperator`
:param cluster_name: The short name or full Amazon Resource Name (ARN) of the cluster to delete.
:param wait_for_completion: If True, waits for creation of the cluster to complete. (default: True)
:param waiter_delay: The amount of time in seconds to wait between attempts,
if not set then the default waiter value will be used.
:param waiter_max_attempts: The maximum number of attempts to be made,
if not set then the default waiter value will be used.
:param deferrable: If True, the operator will wait asynchronously for the job to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = ("cluster_name", "wait_for_completion", "deferrable")
def __init__(
self,
*,
cluster_name: str,
wait_for_completion: bool = True,
waiter_delay: int = 15,
waiter_max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context):
self.log.info("Deleting cluster %r.", self.cluster_name)
result = self.client.delete_cluster(cluster=self.cluster_name)
cluster_details = result["cluster"]
cluster_state = cluster_details.get("status")
if cluster_state == EcsClusterStates.INACTIVE:
# if the cluster doesn't have capacity providers that are associated with it,
# the deletion is instantaneous, and we don't need to wait for it.
self.log.info("Cluster %r in state: %r.", self.cluster_name, cluster_state)
elif self.deferrable:
self.defer(
trigger=ClusterInactiveTrigger(
cluster_arn=cluster_details["clusterArn"],
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
),
method_name="_complete_exec_with_cluster_desc",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
elif self.wait_for_completion:
waiter = self.hook.get_waiter("cluster_inactive")
waiter.wait(
clusters=[cluster_details["clusterArn"]],
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
)
return cluster_details
class EcsDeregisterTaskDefinitionOperator(EcsBaseOperator):
"""
Deregister a task definition on AWS ECS.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EcsDeregisterTaskDefinitionOperator`
:param task_definition: The family and revision (family:revision) or full Amazon Resource Name (ARN)
of the task definition to deregister. If you use a family name, you must specify a revision.
"""
template_fields: Sequence[str] = "task_definition"
def __init__(
self,
*,
task_definition: str,
**kwargs,
):
if "wait_for_completion" in kwargs or "waiter_delay" in kwargs or "waiter_max_attempts" in kwargs:
warnings.warn(
"'wait_for_completion' and waiter related params have no effect and are deprecated, "
"please remove them.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
# remove args to not trigger Invalid arguments exception
kwargs.pop("wait_for_completion", None)
kwargs.pop("waiter_delay", None)
kwargs.pop("waiter_max_attempts", None)
super().__init__(**kwargs)
self.task_definition = task_definition
def execute(self, context: Context):
self.log.info("Deregistering task definition %s.", self.task_definition)
result = self.client.deregister_task_definition(taskDefinition=self.task_definition)
task_definition_details = result["taskDefinition"]
task_definition_arn = task_definition_details["taskDefinitionArn"]
self.log.info(
"Task Definition %r in state: %r.", task_definition_arn, task_definition_details.get("status")
)
return task_definition_arn
class EcsRegisterTaskDefinitionOperator(EcsBaseOperator):
"""
Register a task definition on AWS ECS.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EcsRegisterTaskDefinitionOperator`
:param family: The family name of a task definition to create.
:param container_definitions: A list of container definitions in JSON format that describe
the different containers that make up your task.
:param register_task_kwargs: Extra arguments for Register Task Definition.
"""
template_fields: Sequence[str] = (
"family",
"container_definitions",
"register_task_kwargs",
)
def __init__(
self,
*,
family: str,
container_definitions: list[dict],
register_task_kwargs: dict | None = None,
**kwargs,
):
if "wait_for_completion" in kwargs or "waiter_delay" in kwargs or "waiter_max_attempts" in kwargs:
warnings.warn(
"'wait_for_completion' and waiter related params have no effect and are deprecated, "
"please remove them.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
# remove args to not trigger Invalid arguments exception
kwargs.pop("wait_for_completion", None)
kwargs.pop("waiter_delay", None)
kwargs.pop("waiter_max_attempts", None)
super().__init__(**kwargs)
self.family = family
self.container_definitions = container_definitions
self.register_task_kwargs = register_task_kwargs or {}
def execute(self, context: Context):
self.log.info(
"Registering task definition %s using the following values: %s",
self.family,
self.register_task_kwargs,
)
self.log.info("Using container definition %s", self.container_definitions)
response = self.client.register_task_definition(
family=self.family,
containerDefinitions=self.container_definitions,
**self.register_task_kwargs,
)
task_definition_details = response["taskDefinition"]
task_definition_arn = task_definition_details["taskDefinitionArn"]
self.log.info(
"Task Definition %r in state: %r.", task_definition_arn, task_definition_details.get("status")
)
context["ti"].xcom_push(key="task_definition_arn", value=task_definition_arn)
return task_definition_arn
class EcsRunTaskOperator(EcsBaseOperator):
"""
Execute a task on AWS ECS (Elastic Container Service).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EcsRunTaskOperator`
:param task_definition: the task definition name on Elastic Container Service
:param cluster: the cluster name on Elastic Container Service
:param overrides: the same parameter that boto3 will receive (templated):
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html).
:param region: region name to use in AWS Hook.
Override the region in connection (if provided)
:param launch_type: the launch type on which to run your task ('EC2', 'EXTERNAL', or 'FARGATE')
:param capacity_provider_strategy: the capacity provider strategy to use for the task.
When capacity_provider_strategy is specified, the launch_type parameter is omitted.
If no capacity_provider_strategy or launch_type is specified,
the default capacity provider strategy for the cluster is used.
:param group: the name of the task group associated with the task
:param placement_constraints: an array of placement constraint objects to use for
the task
:param placement_strategy: an array of placement strategy objects to use for
the task
:param platform_version: the platform version on which your task is running
:param network_configuration: the network configuration for the task
:param tags: a dictionary of tags in the form of {'tagKey': 'tagValue'}.
:param awslogs_group: the CloudWatch group where your ECS container logs are stored.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:param awslogs_region: the region in which your CloudWatch logs are stored.
If None, this is the same as the `region` parameter. If that is also None,
this is the default AWS region based on your connection settings.
:param awslogs_stream_prefix: the stream prefix that is used for the CloudWatch logs.
This is usually based on some custom name combined with the name of the container.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:param awslogs_fetch_interval: the interval that the ECS task log fetcher should wait
in between each Cloudwatch logs fetches.
If deferrable is set to True, that parameter is ignored and waiter_delay is used instead.
:param quota_retry: Config if and how to retry the launch of a new ECS task, to handle
transient errors.
:param reattach: If set to True, will check if the task previously launched by the task_instance
is already running. If so, the operator will attach to it instead of starting a new task.
This is to avoid relaunching a new task when the connection drops between Airflow and ECS while
the task is running (when the Airflow worker is restarted for example).
:param number_logs_exception: Number of lines from the last Cloudwatch logs to return in the
AirflowException if an ECS task is stopped (to receive Airflow alerts with the logs of what
failed in the code running in ECS).
:param wait_for_completion: If True, waits for creation of the cluster to complete. (default: True)
:param waiter_delay: The amount of time in seconds to wait between attempts,
if not set then the default waiter value will be used.
:param waiter_max_attempts: The maximum number of attempts to be made,
if not set then the default waiter value will be used.
:param deferrable: If True, the operator will wait asynchronously for the job to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
ui_color = "#f0ede4"
template_fields: Sequence[str] = (
"task_definition",
"cluster",
"overrides",
"launch_type",
"capacity_provider_strategy",
"group",
"placement_constraints",
"placement_strategy",
"platform_version",
"network_configuration",
"tags",
"awslogs_group",
"awslogs_region",
"awslogs_stream_prefix",
"awslogs_fetch_interval",
"propagate_tags",
"reattach",
"number_logs_exception",
"wait_for_completion",
"deferrable",
)
template_fields_renderers = {
"overrides": "json",
"network_configuration": "json",
"tags": "json",
}
REATTACH_XCOM_KEY = "ecs_task_arn"
REATTACH_XCOM_TASK_ID_TEMPLATE = "{task_id}_task_arn"
def __init__(
self,
*,
task_definition: str,
cluster: str,
overrides: dict,
launch_type: str = "EC2",
capacity_provider_strategy: list | None = None,
group: str | None = None,
placement_constraints: list | None = None,
placement_strategy: list | None = None,
platform_version: str | None = None,
network_configuration: dict | None = None,
tags: dict | None = None,
awslogs_group: str | None = None,
awslogs_region: str | None = None,
awslogs_stream_prefix: str | None = None,
awslogs_fetch_interval: timedelta = timedelta(seconds=30),
propagate_tags: str | None = None,
quota_retry: dict | None = None,
reattach: bool = False,
number_logs_exception: int = 10,
wait_for_completion: bool = True,
waiter_delay: int = 6,
waiter_max_attempts: int = 100,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.task_definition = task_definition
self.cluster = cluster
self.overrides = overrides
self.launch_type = launch_type
self.capacity_provider_strategy = capacity_provider_strategy
self.group = group
self.placement_constraints = placement_constraints
self.placement_strategy = placement_strategy
self.platform_version = platform_version
self.network_configuration = network_configuration
self.tags = tags
self.awslogs_group = awslogs_group
self.awslogs_stream_prefix = awslogs_stream_prefix
self.awslogs_region = awslogs_region
self.awslogs_fetch_interval = awslogs_fetch_interval
self.propagate_tags = propagate_tags
self.reattach = reattach
self.number_logs_exception = number_logs_exception
if self.awslogs_region is None:
self.awslogs_region = self.region
self.arn: str | None = None
self.retry_args = quota_retry
self.task_log_fetcher: AwsTaskLogFetcher | None = None
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
if self._aws_logs_enabled() and not self.wait_for_completion:
self.log.warning(
"Trying to get logs without waiting for the task to complete is undefined behavior."
)
@staticmethod
def _get_ecs_task_id(task_arn: str | None) -> str | None:
if task_arn is None:
return None
return task_arn.split("/")[-1]
@provide_session
def execute(self, context, session=None):
self.log.info(
"Running ECS Task - Task definition: %s - on cluster %s", self.task_definition, self.cluster
)
self.log.info("EcsOperator overrides: %s", self.overrides)
if self.reattach:
self._try_reattach_task(context)
if not self.arn:
# start the task except if we reattached to an existing one just before.
self._start_task(context)
if self.deferrable:
self.defer(
trigger=TaskDoneTrigger(
cluster=self.cluster,
task_arn=self.arn,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region=self.region,
log_group=self.awslogs_group,
log_stream=self._get_logs_stream_name(),
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
# self.defer raises a special exception, so execution stops here in this case.
if not self.wait_for_completion:
return
if self._aws_logs_enabled():
self.log.info("Starting ECS Task Log Fetcher")
self.task_log_fetcher = self._get_task_log_fetcher()
self.task_log_fetcher.start()
try:
self._wait_for_task_ended()
finally:
self.task_log_fetcher.stop()
self.task_log_fetcher.join()
else:
self._wait_for_task_ended()
self._after_execution(session)
if self.do_xcom_push and self.task_log_fetcher:
return self.task_log_fetcher.get_last_log_message()
else:
return None
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error in task execution: {event}")
self.arn = event["task_arn"] # restore arn to its updated value, needed for next steps
self._after_execution()
if self._aws_logs_enabled():
# same behavior as non-deferrable mode, return last line of logs of the task.
logs_client = AwsLogsHook(aws_conn_id=self.aws_conn_id, region_name=self.region).conn
one_log = logs_client.get_log_events(
logGroupName=self.awslogs_group,
logStreamName=self._get_logs_stream_name(),
startFromHead=False,
limit=1,
)
if len(one_log["events"]) > 0:
return one_log["events"][0]["message"]
@provide_session
def _after_execution(self, session=None):
self._check_success_task()
self.log.info("ECS Task has been successfully executed")
if self.reattach:
# Clear the XCom value storing the ECS task ARN if the task has completed
# as we can't reattach it anymore
self._xcom_del(session, self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id))
def _xcom_del(self, session, task_id):
session.query(XCom).filter(XCom.dag_id == self.dag_id, XCom.task_id == task_id).delete()
@AwsBaseHook.retry(should_retry_eni)
def _start_task(self, context):
run_opts = {
"cluster": self.cluster,
"taskDefinition": self.task_definition,
"overrides": self.overrides,
"startedBy": self.owner,
}
if self.capacity_provider_strategy:
run_opts["capacityProviderStrategy"] = self.capacity_provider_strategy
elif self.launch_type:
run_opts["launchType"] = self.launch_type
if self.platform_version is not None:
run_opts["platformVersion"] = self.platform_version
if self.group is not None:
run_opts["group"] = self.group
if self.placement_constraints is not None:
run_opts["placementConstraints"] = self.placement_constraints
if self.placement_strategy is not None:
run_opts["placementStrategy"] = self.placement_strategy
if self.network_configuration is not None:
run_opts["networkConfiguration"] = self.network_configuration
if self.tags is not None:
run_opts["tags"] = [{"key": k, "value": v} for (k, v) in self.tags.items()]
if self.propagate_tags is not None:
run_opts["propagateTags"] = self.propagate_tags
response = self.client.run_task(**run_opts)
failures = response["failures"]
if len(failures) > 0:
raise EcsOperatorError(failures, response)
self.log.info("ECS Task started: %s", response)
self.arn = response["tasks"][0]["taskArn"]
self.log.info("ECS task ID is: %s", self._get_ecs_task_id(self.arn))
if self.reattach:
# Save the task ARN in XCom to be able to reattach it if needed
self.xcom_push(context, key=self.REATTACH_XCOM_KEY, value=self.arn)
def _try_reattach_task(self, context):
task_def_resp = self.client.describe_task_definition(taskDefinition=self.task_definition)
ecs_task_family = task_def_resp["taskDefinition"]["family"]
list_tasks_resp = self.client.list_tasks(
cluster=self.cluster, desiredStatus="RUNNING", family=ecs_task_family
)
running_tasks = list_tasks_resp["taskArns"]
# Check if the ECS task previously launched is already running
previous_task_arn = self.xcom_pull(
context,
task_ids=self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id),
key=self.REATTACH_XCOM_KEY,
)
if previous_task_arn in running_tasks:
self.arn = previous_task_arn
self.log.info("Reattaching previously launched task: %s", self.arn)
else:
self.log.info("No active previously launched task found to reattach")
def _wait_for_task_ended(self) -> None:
if not self.client or not self.arn:
return
waiter = self.client.get_waiter("tasks_stopped")
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(
cluster=self.cluster,
tasks=[self.arn],
WaiterConfig={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
return
def _aws_logs_enabled(self):
return self.awslogs_group and self.awslogs_stream_prefix
def _get_logs_stream_name(self) -> str:
return f"{self.awslogs_stream_prefix}/{self._get_ecs_task_id(self.arn)}"
def _get_task_log_fetcher(self) -> AwsTaskLogFetcher:
if not self.awslogs_group:
raise ValueError("must specify awslogs_group to fetch task logs")
return AwsTaskLogFetcher(
aws_conn_id=self.aws_conn_id,
region_name=self.awslogs_region,
log_group=self.awslogs_group,
log_stream_name=self._get_logs_stream_name(),
fetch_interval=self.awslogs_fetch_interval,
logger=self.log,
)
@AwsBaseHook.retry(should_retry_eni)
def _check_success_task(self) -> None:
if not self.client or not self.arn:
return
response = self.client.describe_tasks(cluster=self.cluster, tasks=[self.arn])
self.log.info("ECS Task stopped, check status: %s", response)
if len(response.get("failures", [])) > 0:
raise AirflowException(response)
for task in response["tasks"]:
if task.get("stopCode", "") == "TaskFailedToStart":
# Reset task arn here otherwise the retry run will not start
# a new task but keep polling the old dead one
# I'm not resetting it for other exceptions here because
# EcsTaskFailToStart is the only exception that's being retried at the moment
self.arn = None
raise EcsTaskFailToStart(f"The task failed to start due to: {task.get('stoppedReason', '')}")
# This is a `stoppedReason` that indicates a task has not
# successfully finished, but there is no other indication of failure
# in the response.
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/stopped-task-errors.html
if re.match(r"Host EC2 \(instance .+?\) (stopped|terminated)\.", task.get("stoppedReason", "")):
raise AirflowException(
f"The task was stopped because the host instance terminated:"
f" {task.get('stoppedReason', '')}"
)
containers = task["containers"]
for container in containers:
if container.get("lastStatus") == "STOPPED" and container.get("exitCode", 1) != 0:
if self.task_log_fetcher:
last_logs = "\n".join(
self.task_log_fetcher.get_last_log_messages(self.number_logs_exception)
)
raise AirflowException(
f"This task is not in success state - last {self.number_logs_exception} "
f"logs from Cloudwatch:\n{last_logs}"
)
else:
raise AirflowException(f"This task is not in success state {task}")
elif container.get("lastStatus") == "PENDING":
raise AirflowException(f"This task is still pending {task}")
elif "error" in container.get("reason", "").lower():
raise AirflowException(
f"This containers encounter an error during launching: "
f"{container.get('reason', '').lower()}"
)
def on_kill(self) -> None:
if not self.client or not self.arn:
return
if self.task_log_fetcher:
self.task_log_fetcher.stop()
response = self.client.stop_task(
cluster=self.cluster, task=self.arn, reason="Task killed by the user"
)
self.log.info(response)
| 33,315 | 41.988387 | 111 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/cloud_formation.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains CloudFormation create/delete stack operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.cloud_formation import CloudFormationHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudFormationCreateStackOperator(BaseOperator):
"""
An operator that creates a CloudFormation stack.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFormationCreateStackOperator`
:param stack_name: stack name (templated)
:param cloudformation_parameters: parameters to be passed to CloudFormation.
:param aws_conn_id: aws connection to uses
"""
template_fields: Sequence[str] = ("stack_name", "cloudformation_parameters")
template_ext: Sequence[str] = ()
ui_color = "#6b9659"
def __init__(
self, *, stack_name: str, cloudformation_parameters: dict, aws_conn_id: str = "aws_default", **kwargs
):
super().__init__(**kwargs)
self.stack_name = stack_name
self.cloudformation_parameters = cloudformation_parameters
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
self.log.info("CloudFormation parameters: %s", self.cloudformation_parameters)
cloudformation_hook = CloudFormationHook(aws_conn_id=self.aws_conn_id)
cloudformation_hook.create_stack(self.stack_name, self.cloudformation_parameters)
class CloudFormationDeleteStackOperator(BaseOperator):
"""
An operator that deletes a CloudFormation stack.
:param stack_name: stack name (templated)
:param cloudformation_parameters: parameters to be passed to CloudFormation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFormationDeleteStackOperator`
:param aws_conn_id: aws connection to uses
"""
template_fields: Sequence[str] = ("stack_name",)
template_ext: Sequence[str] = ()
ui_color = "#1d472b"
ui_fgcolor = "#FFF"
def __init__(
self,
*,
stack_name: str,
cloudformation_parameters: dict | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.cloudformation_parameters = cloudformation_parameters or {}
self.stack_name = stack_name
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
self.log.info("CloudFormation Parameters: %s", self.cloudformation_parameters)
cloudformation_hook = CloudFormationHook(aws_conn_id=self.aws_conn_id)
cloudformation_hook.delete_stack(self.stack_name, self.cloudformation_parameters)
| 3,633 | 35.707071 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/ec2.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.ec2 import EC2Hook
if TYPE_CHECKING:
from airflow.utils.context import Context
class EC2StartInstanceOperator(BaseOperator):
"""
Start AWS EC2 instance using boto3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EC2StartInstanceOperator`
:param instance_id: id of the AWS EC2 instance
:param aws_conn_id: aws connection to use
:param region_name: (optional) aws region name associated with the client
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
"""
template_fields: Sequence[str] = ("instance_id", "region_name")
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
def __init__(
self,
*,
instance_id: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
check_interval: float = 15,
**kwargs,
):
super().__init__(**kwargs)
self.instance_id = instance_id
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.check_interval = check_interval
def execute(self, context: Context):
ec2_hook = EC2Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
self.log.info("Starting EC2 instance %s", self.instance_id)
instance = ec2_hook.get_instance(instance_id=self.instance_id)
instance.start()
ec2_hook.wait_for_state(
instance_id=self.instance_id,
target_state="running",
check_interval=self.check_interval,
)
class EC2StopInstanceOperator(BaseOperator):
"""
Stop AWS EC2 instance using boto3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EC2StopInstanceOperator`
:param instance_id: id of the AWS EC2 instance
:param aws_conn_id: aws connection to use
:param region_name: (optional) aws region name associated with the client
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
"""
template_fields: Sequence[str] = ("instance_id", "region_name")
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
def __init__(
self,
*,
instance_id: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
check_interval: float = 15,
**kwargs,
):
super().__init__(**kwargs)
self.instance_id = instance_id
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.check_interval = check_interval
def execute(self, context: Context):
ec2_hook = EC2Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
self.log.info("Stopping EC2 instance %s", self.instance_id)
instance = ec2_hook.get_instance(instance_id=self.instance_id)
instance.stop()
ec2_hook.wait_for_state(
instance_id=self.instance_id,
target_state="stopped",
check_interval=self.check_interval,
)
class EC2CreateInstanceOperator(BaseOperator):
"""
Create and start a specified number of EC2 Instances using boto3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EC2CreateInstanceOperator`
:param image_id: ID of the AMI used to create the instance.
:param max_count: Maximum number of instances to launch. Defaults to 1.
:param min_count: Minimum number of instances to launch. Defaults to 1.
:param aws_conn_id: AWS connection to use
:param region_name: AWS region name associated with the client.
:param poll_interval: Number of seconds to wait before attempting to
check state of instance. Only used if wait_for_completion is True. Default is 20.
:param max_attempts: Maximum number of attempts when checking state of instance.
Only used if wait_for_completion is True. Default is 20.
:param config: Dictionary for arbitrary parameters to the boto3 run_instances call.
:param wait_for_completion: If True, the operator will wait for the instance to be
in the `running` state before returning.
"""
template_fields: Sequence[str] = (
"image_id",
"max_count",
"min_count",
"aws_conn_id",
"region_name",
"config",
"wait_for_completion",
)
def __init__(
self,
image_id: str,
max_count: int = 1,
min_count: int = 1,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
poll_interval: int = 20,
max_attempts: int = 20,
config: dict | None = None,
wait_for_completion: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.image_id = image_id
self.max_count = max_count
self.min_count = min_count
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.poll_interval = poll_interval
self.max_attempts = max_attempts
self.config = config or {}
self.wait_for_completion = wait_for_completion
def execute(self, context: Context):
ec2_hook = EC2Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name, api_type="client_type")
instances = ec2_hook.conn.run_instances(
ImageId=self.image_id,
MinCount=self.min_count,
MaxCount=self.max_count,
**self.config,
)["Instances"]
instance_ids = []
for instance in instances:
instance_ids.append(instance["InstanceId"])
self.log.info("Created EC2 instance %s", instance["InstanceId"])
if self.wait_for_completion:
ec2_hook.get_waiter("instance_running").wait(
InstanceIds=[instance["InstanceId"]],
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempts,
},
)
return instance_ids
class EC2TerminateInstanceOperator(BaseOperator):
"""
Terminate EC2 Instances using boto3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EC2TerminateInstanceOperator`
:param instance_id: ID of the instance to be terminated.
:param aws_conn_id: AWS connection to use
:param region_name: AWS region name associated with the client.
:param poll_interval: Number of seconds to wait before attempting to
check state of instance. Only used if wait_for_completion is True. Default is 20.
:param max_attempts: Maximum number of attempts when checking state of instance.
Only used if wait_for_completion is True. Default is 20.
:param wait_for_completion: If True, the operator will wait for the instance to be
in the `terminated` state before returning.
"""
template_fields: Sequence[str] = ("instance_ids", "region_name", "aws_conn_id", "wait_for_completion")
def __init__(
self,
instance_ids: str | list[str],
aws_conn_id: str = "aws_default",
region_name: str | None = None,
poll_interval: int = 20,
max_attempts: int = 20,
wait_for_completion: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.instance_ids = instance_ids
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.poll_interval = poll_interval
self.max_attempts = max_attempts
self.wait_for_completion = wait_for_completion
def execute(self, context: Context):
if isinstance(self.instance_ids, str):
self.instance_ids = [self.instance_ids]
ec2_hook = EC2Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name, api_type="client_type")
ec2_hook.conn.terminate_instances(InstanceIds=self.instance_ids)
for instance_id in self.instance_ids:
self.log.info("Terminating EC2 instance %s", instance_id)
if self.wait_for_completion:
ec2_hook.get_waiter("instance_terminated").wait(
InstanceIds=[instance_id],
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempts,
},
)
| 9,591 | 36.322957 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/sqs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Publish message to SQS queue."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.sqs import SqsHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SqsPublishOperator(BaseOperator):
"""
Publish a message to an Amazon SQS queue.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SqsPublishOperator`
:param sqs_queue: The SQS queue url (templated)
:param message_content: The message content (templated)
:param message_attributes: additional attributes for the message (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:param delay_seconds: message delay (templated) (default: 1 second)
:param message_group_id: This parameter applies only to FIFO (first-in-first-out) queues. (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:param aws_conn_id: AWS connection id (default: aws_default)
"""
template_fields: Sequence[str] = (
"sqs_queue",
"message_content",
"delay_seconds",
"message_attributes",
"message_group_id",
)
template_fields_renderers = {"message_attributes": "json"}
ui_color = "#6ad3fa"
def __init__(
self,
*,
sqs_queue: str,
message_content: str,
message_attributes: dict | None = None,
delay_seconds: int = 0,
message_group_id: str | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.sqs_queue = sqs_queue
self.aws_conn_id = aws_conn_id
self.message_content = message_content
self.delay_seconds = delay_seconds
self.message_attributes = message_attributes or {}
self.message_group_id = message_group_id
def execute(self, context: Context) -> dict:
"""
Publish the message to the Amazon SQS queue.
:param context: the context object
:return: dict with information about the message sent
For details of the returned dict see :py:meth:`botocore.client.SQS.send_message`
"""
hook = SqsHook(aws_conn_id=self.aws_conn_id)
result = hook.send_message(
queue_url=self.sqs_queue,
message_body=self.message_content,
delay_seconds=self.delay_seconds,
message_attributes=self.message_attributes,
message_group_id=self.message_group_id,
)
self.log.info("send_message result: %s", result)
return result
| 3,565 | 35.762887 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/glue_crawler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from airflow import AirflowException
from airflow.configuration import conf
from airflow.providers.amazon.aws.triggers.glue_crawler import GlueCrawlerCompleteTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.glue_crawler import GlueCrawlerHook
class GlueCrawlerOperator(BaseOperator):
"""
Creates, updates and triggers an AWS Glue Crawler.
AWS Glue Crawler is a serverless service that manages a catalog of
metadata tables that contain the inferred schema, format and data
types of data stores within the AWS cloud.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlueCrawlerOperator`
:param config: Configurations for the AWS Glue crawler
:param aws_conn_id: aws connection to use
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check crawler status
:param wait_for_completion: Whether to wait for crawl execution completion. (default: True)
:param deferrable: If True, the operator will wait asynchronously for the crawl to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = ("config",)
ui_color = "#ededed"
def __init__(
self,
config,
aws_conn_id="aws_default",
region_name: str | None = None,
poll_interval: int = 5,
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.poll_interval = poll_interval
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.region_name = region_name
self.config = config
@cached_property
def hook(self) -> GlueCrawlerHook:
"""Create and return a GlueCrawlerHook."""
return GlueCrawlerHook(self.aws_conn_id, region_name=self.region_name)
def execute(self, context: Context):
"""
Executes AWS Glue Crawler from Airflow.
:return: the name of the current glue crawler.
"""
crawler_name = self.config["Name"]
if self.hook.has_crawler(crawler_name):
self.hook.update_crawler(**self.config)
else:
self.hook.create_crawler(**self.config)
self.log.info("Triggering AWS Glue Crawler")
self.hook.start_crawler(crawler_name)
if self.deferrable:
self.defer(
trigger=GlueCrawlerCompleteTrigger(
crawler_name=crawler_name,
waiter_delay=self.poll_interval,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
self.log.info("Waiting for AWS Glue Crawler")
self.hook.wait_for_crawler_completion(crawler_name=crawler_name, poll_interval=self.poll_interval)
return crawler_name
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error in glue crawl: {event}")
return self.config["Name"]
| 4,348 | 37.149123 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/eks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Amazon EKS operators."""
from __future__ import annotations
import logging
import warnings
from ast import literal_eval
from datetime import timedelta
from typing import TYPE_CHECKING, Any, List, Sequence, cast
from botocore.exceptions import ClientError, WaiterError
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.eks import EksHook
from airflow.providers.amazon.aws.triggers.eks import (
EksCreateClusterTrigger,
EksCreateFargateProfileTrigger,
EksCreateNodegroupTrigger,
EksDeleteClusterTrigger,
EksDeleteFargateProfileTrigger,
EksDeleteNodegroupTrigger,
)
from airflow.providers.amazon.aws.utils.waiter_with_logging import wait
from airflow.providers.cncf.kubernetes.utils.pod_manager import OnFinishAction
try:
from airflow.providers.cncf.kubernetes.operators.pod import KubernetesPodOperator
except ImportError:
# preserve backward compatibility for older versions of cncf.kubernetes provider
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
CHECK_INTERVAL_SECONDS = 15
TIMEOUT_SECONDS = 25 * 60
DEFAULT_COMPUTE_TYPE = "nodegroup"
DEFAULT_CONN_ID = "aws_default"
DEFAULT_FARGATE_PROFILE_NAME = "profile"
DEFAULT_NAMESPACE_NAME = "default"
DEFAULT_NODEGROUP_NAME = "nodegroup"
CAN_NOT_DELETE_MSG = "A cluster can not be deleted with attached {compute}. Deleting {count} {compute}."
MISSING_ARN_MSG = "Creating an {compute} requires {requirement} to be passed in."
SUCCESS_MSG = "No {compute} remain, deleting cluster."
SUPPORTED_COMPUTE_VALUES = frozenset({"nodegroup", "fargate"})
NODEGROUP_FULL_NAME = "Amazon EKS managed node groups"
FARGATE_FULL_NAME = "AWS Fargate profiles"
def _create_compute(
compute: str | None,
cluster_name: str,
aws_conn_id: str,
region: str | None,
waiter_delay: int,
waiter_max_attempts: int,
wait_for_completion: bool = False,
nodegroup_name: str | None = None,
nodegroup_role_arn: str | None = None,
create_nodegroup_kwargs: dict | None = None,
fargate_profile_name: str | None = None,
fargate_pod_execution_role_arn: str | None = None,
fargate_selectors: list | None = None,
create_fargate_profile_kwargs: dict | None = None,
subnets: list[str] | None = None,
):
log = logging.getLogger(__name__)
eks_hook = EksHook(aws_conn_id=aws_conn_id, region_name=region)
if compute == "nodegroup" and nodegroup_name:
# this is to satisfy mypy
subnets = subnets or []
create_nodegroup_kwargs = create_nodegroup_kwargs or {}
eks_hook.create_nodegroup(
clusterName=cluster_name,
nodegroupName=nodegroup_name,
subnets=subnets,
nodeRole=nodegroup_role_arn,
**create_nodegroup_kwargs,
)
if wait_for_completion:
log.info("Waiting for nodegroup to provision. This will take some time.")
wait(
waiter=eks_hook.conn.get_waiter("nodegroup_active"),
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
args={"clusterName": cluster_name, "nodegroupName": nodegroup_name},
failure_message="Nodegroup creation failed",
status_message="Nodegroup status is",
status_args=["nodegroup.status"],
)
elif compute == "fargate" and fargate_profile_name:
# this is to satisfy mypy
create_fargate_profile_kwargs = create_fargate_profile_kwargs or {}
fargate_selectors = fargate_selectors or []
eks_hook.create_fargate_profile(
clusterName=cluster_name,
fargateProfileName=fargate_profile_name,
podExecutionRoleArn=fargate_pod_execution_role_arn,
selectors=fargate_selectors,
**create_fargate_profile_kwargs,
)
if wait_for_completion:
log.info("Waiting for Fargate profile to provision. This will take some time.")
wait(
waiter=eks_hook.conn.get_waiter("fargate_profile_active"),
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
args={"clusterName": cluster_name, "fargateProfileName": fargate_profile_name},
failure_message="Fargate profile creation failed",
status_message="Fargate profile status is",
status_args=["fargateProfile.status"],
)
class EksCreateClusterOperator(BaseOperator):
"""
Creates an Amazon EKS Cluster control plane.
Optionally, can also create the supporting compute architecture:
- If argument 'compute' is provided with a value of 'nodegroup', will also
attempt to create an Amazon EKS Managed Nodegroup for the cluster.
See :class:`~airflow.providers.amazon.aws.operators.EksCreateNodegroupOperator`
documentation for requirements.
- If argument 'compute' is provided with a value of 'fargate', will also attempt to create an AWS
Fargate profile for the cluster.
See :class:`~airflow.providers.amazon.aws.operators.EksCreateFargateProfileOperator`
documentation for requirements.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksCreateClusterOperator`
:param cluster_name: The unique name to give to your Amazon EKS Cluster. (templated)
:param cluster_role_arn: The Amazon Resource Name (ARN) of the IAM role that provides permissions for the
Kubernetes control plane to make calls to AWS API operations on your behalf. (templated)
:param resources_vpc_config: The VPC configuration used by the cluster control plane. (templated)
:param compute: The type of compute architecture to generate along with the cluster. (templated)
Defaults to 'nodegroup' to generate an EKS Managed Nodegroup.
:param create_cluster_kwargs: Optional parameters to pass to the CreateCluster API (templated)
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
If compute is assigned the value of 'nodegroup':
:param nodegroup_name: *REQUIRED* The unique name to give your Amazon EKS managed node group. (templated)
:param nodegroup_role_arn: *REQUIRED* The Amazon Resource Name (ARN) of the IAM role to associate with
the Amazon EKS managed node group. (templated)
:param create_nodegroup_kwargs: Optional parameters to pass to the CreateNodegroup API (templated)
If compute is assigned the value of 'fargate':
:param fargate_profile_name: *REQUIRED* The unique name to give your AWS Fargate profile. (templated)
:param fargate_pod_execution_role_arn: *REQUIRED* The Amazon Resource Name (ARN) of the pod execution
role to use for pods that match the selectors in the AWS Fargate profile. (templated)
:param fargate_selectors: The selectors to match for pods to use this AWS Fargate profile. (templated)
:param create_fargate_profile_kwargs: Optional parameters to pass to the CreateFargateProfile API
(templated)
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check cluster state
:param waiter_max_attempts: The maximum number of attempts to check cluster state
:param deferrable: If True, the operator will wait asynchronously for the job to complete.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"cluster_name",
"cluster_role_arn",
"resources_vpc_config",
"create_cluster_kwargs",
"compute",
"nodegroup_name",
"nodegroup_role_arn",
"create_nodegroup_kwargs",
"fargate_profile_name",
"fargate_pod_execution_role_arn",
"fargate_selectors",
"create_fargate_profile_kwargs",
"wait_for_completion",
"aws_conn_id",
"region",
)
def __init__(
self,
cluster_name: str,
cluster_role_arn: str,
resources_vpc_config: dict,
compute: str | None = DEFAULT_COMPUTE_TYPE,
create_cluster_kwargs: dict | None = None,
nodegroup_name: str = DEFAULT_NODEGROUP_NAME,
nodegroup_role_arn: str | None = None,
create_nodegroup_kwargs: dict | None = None,
fargate_profile_name: str = DEFAULT_FARGATE_PROFILE_NAME,
fargate_pod_execution_role_arn: str | None = None,
fargate_selectors: list | None = None,
create_fargate_profile_kwargs: dict | None = None,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
waiter_max_attempts: int = 40,
**kwargs,
) -> None:
self.compute = compute
self.cluster_name = cluster_name
self.cluster_role_arn = cluster_role_arn
self.resources_vpc_config = resources_vpc_config
self.create_cluster_kwargs = create_cluster_kwargs or {}
self.nodegroup_role_arn = nodegroup_role_arn
self.fargate_pod_execution_role_arn = fargate_pod_execution_role_arn
self.create_fargate_profile_kwargs = create_fargate_profile_kwargs or {}
self.wait_for_completion = False if deferrable else wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
self.region = region
self.nodegroup_name = nodegroup_name
self.create_nodegroup_kwargs = create_nodegroup_kwargs or {}
self.fargate_selectors = fargate_selectors or [{"namespace": DEFAULT_NAMESPACE_NAME}]
self.fargate_profile_name = fargate_profile_name
self.deferrable = deferrable
super().__init__(
**kwargs,
)
def execute(self, context: Context):
if self.compute:
if self.compute not in SUPPORTED_COMPUTE_VALUES:
raise ValueError("Provided compute type is not supported.")
elif (self.compute == "nodegroup") and not self.nodegroup_role_arn:
raise ValueError(
MISSING_ARN_MSG.format(compute=NODEGROUP_FULL_NAME, requirement="nodegroup_role_arn")
)
elif (self.compute == "fargate") and not self.fargate_pod_execution_role_arn:
raise ValueError(
MISSING_ARN_MSG.format(
compute=FARGATE_FULL_NAME, requirement="fargate_pod_execution_role_arn"
)
)
self.eks_hook = EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region)
self.eks_hook.create_cluster(
name=self.cluster_name,
roleArn=self.cluster_role_arn,
resourcesVpcConfig=self.resources_vpc_config,
**self.create_cluster_kwargs,
)
# Short circuit early if we don't need to wait to attach compute
# and the caller hasn't requested to wait for the cluster either.
if not any([self.compute, self.wait_for_completion, self.deferrable]):
return None
self.log.info("Waiting for EKS Cluster to provision. This will take some time.")
client = self.eks_hook.conn
if self.deferrable:
self.defer(
trigger=EksCreateClusterTrigger(
cluster_name=self.cluster_name,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
method_name="deferrable_create_cluster_next",
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
)
try:
client.get_waiter("cluster_active").wait(
name=self.cluster_name,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
except (ClientError, WaiterError) as e:
self.log.error("Cluster failed to start and will be torn down.\n %s", e)
self.eks_hook.delete_cluster(name=self.cluster_name)
client.get_waiter("cluster_deleted").wait(
name=self.cluster_name,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
raise
_create_compute(
compute=self.compute,
cluster_name=self.cluster_name,
aws_conn_id=self.aws_conn_id,
region=self.region,
wait_for_completion=self.wait_for_completion,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
nodegroup_name=self.nodegroup_name,
nodegroup_role_arn=self.nodegroup_role_arn,
create_nodegroup_kwargs=self.create_nodegroup_kwargs,
fargate_profile_name=self.fargate_profile_name,
fargate_pod_execution_role_arn=self.fargate_pod_execution_role_arn,
fargate_selectors=self.fargate_selectors,
create_fargate_profile_kwargs=self.create_fargate_profile_kwargs,
subnets=cast(List[str], self.resources_vpc_config.get("subnetIds")),
)
def deferrable_create_cluster_next(self, context: Context, event: dict[str, Any] | None = None) -> None:
if event is None:
self.log.error("Trigger error: event is None")
raise AirflowException("Trigger error: event is None")
elif event["status"] == "failed":
self.log.error("Cluster failed to start and will be torn down.")
self.eks_hook.delete_cluster(name=self.cluster_name)
self.defer(
trigger=EksDeleteClusterTrigger(
cluster_name=self.cluster_name,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
force_delete_compute=False,
),
method_name="execute_failed",
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
)
elif event["status"] == "success":
self.log.info("Cluster is ready to provision compute.")
_create_compute(
compute=self.compute,
cluster_name=self.cluster_name,
aws_conn_id=self.aws_conn_id,
region=self.region,
wait_for_completion=self.wait_for_completion,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
nodegroup_name=self.nodegroup_name,
nodegroup_role_arn=self.nodegroup_role_arn,
create_nodegroup_kwargs=self.create_nodegroup_kwargs,
fargate_profile_name=self.fargate_profile_name,
fargate_pod_execution_role_arn=self.fargate_pod_execution_role_arn,
fargate_selectors=self.fargate_selectors,
create_fargate_profile_kwargs=self.create_fargate_profile_kwargs,
subnets=cast(List[str], self.resources_vpc_config.get("subnetIds")),
)
if self.compute == "fargate":
self.defer(
trigger=EksCreateFargateProfileTrigger(
cluster_name=self.cluster_name,
fargate_profile_name=self.fargate_profile_name,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region=self.region,
),
method_name="execute_complete",
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
)
else:
self.defer(
trigger=EksCreateNodegroupTrigger(
nodegroup_name=self.nodegroup_name,
cluster_name=self.cluster_name,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
method_name="execute_complete",
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
)
def execute_failed(self, context: Context, event: dict[str, Any] | None = None) -> None:
if event is None:
self.log.info("Trigger error: event is None")
raise AirflowException("Trigger error: event is None")
elif event["status"] == "delteted":
self.log.info("Cluster deleted")
raise event["exception"]
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
resource = "fargate profile" if self.compute == "fargate" else self.compute
if event is None:
self.log.info("Trigger error: event is None")
raise AirflowException("Trigger error: event is None")
elif event["status"] != "success":
raise AirflowException(f"Error creating {resource}: {event}")
self.log.info("%s created successfully", resource)
class EksCreateNodegroupOperator(BaseOperator):
"""
Creates an Amazon EKS managed node group for an existing Amazon EKS Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksCreateNodegroupOperator`
:param cluster_name: The name of the Amazon EKS Cluster to create the managed nodegroup in. (templated)
:param nodegroup_name: The unique name to give your managed nodegroup. (templated)
:param nodegroup_subnets:
The subnets to use for the Auto Scaling group that is created for the managed nodegroup. (templated)
:param nodegroup_role_arn:
The Amazon Resource Name (ARN) of the IAM role to associate with the managed nodegroup. (templated)
:param create_nodegroup_kwargs: Optional parameters to pass to the Create Nodegroup API (templated)
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check nodegroup state
:param waiter_max_attempts: The maximum number of attempts to check nodegroup state
:param deferrable: If True, the operator will wait asynchronously for the nodegroup to be created.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"cluster_name",
"nodegroup_subnets",
"nodegroup_role_arn",
"nodegroup_name",
"create_nodegroup_kwargs",
"wait_for_completion",
"aws_conn_id",
"region",
)
def __init__(
self,
cluster_name: str,
nodegroup_subnets: list[str] | str,
nodegroup_role_arn: str,
nodegroup_name: str = DEFAULT_NODEGROUP_NAME,
create_nodegroup_kwargs: dict | None = None,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 80,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
self.nodegroup_subnets = nodegroup_subnets
self.compute = "nodegroup"
self.cluster_name = cluster_name
self.nodegroup_role_arn = nodegroup_role_arn
self.nodegroup_name = nodegroup_name
self.create_nodegroup_kwargs = create_nodegroup_kwargs or {}
self.wait_for_completion = False if deferrable else wait_for_completion
self.aws_conn_id = aws_conn_id
self.region = region
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
super().__init__(**kwargs)
def execute(self, context: Context):
self.log.info(self.task_id)
if isinstance(self.nodegroup_subnets, str):
nodegroup_subnets_list: list[str] = []
if self.nodegroup_subnets != "":
try:
nodegroup_subnets_list = cast(list, literal_eval(self.nodegroup_subnets))
except ValueError:
self.log.warning(
"The nodegroup_subnets should be List or string representing "
"Python list and is %s. Defaulting to []",
self.nodegroup_subnets,
)
self.nodegroup_subnets = nodegroup_subnets_list
_create_compute(
compute=self.compute,
cluster_name=self.cluster_name,
aws_conn_id=self.aws_conn_id,
region=self.region,
wait_for_completion=self.wait_for_completion,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
nodegroup_name=self.nodegroup_name,
nodegroup_role_arn=self.nodegroup_role_arn,
create_nodegroup_kwargs=self.create_nodegroup_kwargs,
subnets=self.nodegroup_subnets,
)
if self.deferrable:
self.defer(
trigger=EksCreateNodegroupTrigger(
cluster_name=self.cluster_name,
nodegroup_name=self.nodegroup_name,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error creating nodegroup: {event}")
return
class EksCreateFargateProfileOperator(BaseOperator):
"""
Creates an AWS Fargate profile for an Amazon EKS cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksCreateFargateProfileOperator`
:param cluster_name: The name of the Amazon EKS cluster to apply the AWS Fargate profile to. (templated)
:param pod_execution_role_arn: The Amazon Resource Name (ARN) of the pod execution role to
use for pods that match the selectors in the AWS Fargate profile. (templated)
:param selectors: The selectors to match for pods to use this AWS Fargate profile. (templated)
:param fargate_profile_name: The unique name to give your AWS Fargate profile. (templated)
:param create_fargate_profile_kwargs: Optional parameters to pass to the CreateFargate Profile API
(templated)
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check profile status
:param waiter_max_attempts: The maximum number of attempts to check the status of the profile.
:param deferrable: If True, the operator will wait asynchronously for the profile to be created.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"cluster_name",
"pod_execution_role_arn",
"selectors",
"fargate_profile_name",
"create_fargate_profile_kwargs",
"wait_for_completion",
"aws_conn_id",
"region",
)
def __init__(
self,
cluster_name: str,
pod_execution_role_arn: str,
selectors: list,
fargate_profile_name: str = DEFAULT_FARGATE_PROFILE_NAME,
create_fargate_profile_kwargs: dict | None = None,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 10,
waiter_max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
self.cluster_name = cluster_name
self.selectors = selectors
self.pod_execution_role_arn = pod_execution_role_arn
self.fargate_profile_name = fargate_profile_name
self.create_fargate_profile_kwargs = create_fargate_profile_kwargs or {}
self.wait_for_completion = False if deferrable else wait_for_completion
self.aws_conn_id = aws_conn_id
self.region = region
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
self.compute = "fargate"
super().__init__(
**kwargs,
)
def execute(self, context: Context):
_create_compute(
compute=self.compute,
cluster_name=self.cluster_name,
aws_conn_id=self.aws_conn_id,
region=self.region,
wait_for_completion=self.wait_for_completion,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
fargate_profile_name=self.fargate_profile_name,
fargate_pod_execution_role_arn=self.pod_execution_role_arn,
fargate_selectors=self.selectors,
create_fargate_profile_kwargs=self.create_fargate_profile_kwargs,
)
if self.deferrable:
self.defer(
trigger=EksCreateFargateProfileTrigger(
cluster_name=self.cluster_name,
fargate_profile_name=self.fargate_profile_name,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
region=self.region,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=(self.waiter_max_attempts * self.waiter_delay + 60)),
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error creating Fargate profile: {event}")
else:
self.log.info("Fargate profile created successfully")
return
class EksDeleteClusterOperator(BaseOperator):
"""
Deletes the Amazon EKS Cluster control plane and all nodegroups attached to it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksDeleteClusterOperator`
:param cluster_name: The name of the Amazon EKS Cluster to delete. (templated)
:param force_delete_compute: If True, will delete any attached resources. (templated)
Defaults to False.
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check cluster state
:param waiter_max_attempts: The maximum number of attempts to check cluster state
:param deferrable: If True, the operator will wait asynchronously for the cluster to be deleted.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"cluster_name",
"force_delete_compute",
"wait_for_completion",
"aws_conn_id",
"region",
)
def __init__(
self,
cluster_name: str,
force_delete_compute: bool = False,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
waiter_max_attempts: int = 40,
**kwargs,
) -> None:
self.cluster_name = cluster_name
self.force_delete_compute = force_delete_compute
self.wait_for_completion = False if deferrable else wait_for_completion
self.aws_conn_id = aws_conn_id
self.region = region
self.deferrable = deferrable
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
super().__init__(**kwargs)
def execute(self, context: Context):
eks_hook = EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
if self.deferrable:
self.defer(
trigger=EksDeleteClusterTrigger(
cluster_name=self.cluster_name,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
force_delete_compute=self.force_delete_compute,
),
method_name="execute_complete",
timeout=timedelta(seconds=self.waiter_delay * self.waiter_max_attempts),
)
elif self.force_delete_compute:
self.delete_any_nodegroups(eks_hook)
self.delete_any_fargate_profiles(eks_hook)
eks_hook.delete_cluster(name=self.cluster_name)
if self.wait_for_completion:
self.log.info("Waiting for cluster to delete. This will take some time.")
eks_hook.conn.get_waiter("cluster_deleted").wait(name=self.cluster_name)
def delete_any_nodegroups(self, eks_hook) -> None:
"""
Deletes all Amazon EKS managed node groups for a provided Amazon EKS Cluster.
Amazon EKS managed node groups can be deleted in parallel, so we can send all
delete commands in bulk and move on once the count of nodegroups is zero.
"""
nodegroups = eks_hook.list_nodegroups(clusterName=self.cluster_name)
if nodegroups:
self.log.info(CAN_NOT_DELETE_MSG.format(compute=NODEGROUP_FULL_NAME, count=len(nodegroups)))
for group in nodegroups:
eks_hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=group)
# Note this is a custom waiter so we're using hook.get_waiter(), not hook.conn.get_waiter().
self.log.info("Waiting for all nodegroups to delete. This will take some time.")
eks_hook.get_waiter("all_nodegroups_deleted").wait(clusterName=self.cluster_name)
self.log.info(SUCCESS_MSG.format(compute=NODEGROUP_FULL_NAME))
def delete_any_fargate_profiles(self, eks_hook) -> None:
"""
Deletes all EKS Fargate profiles for a provided Amazon EKS Cluster.
EKS Fargate profiles must be deleted one at a time, so we must wait
for one to be deleted before sending the next delete command.
"""
fargate_profiles = eks_hook.list_fargate_profiles(clusterName=self.cluster_name)
if fargate_profiles:
self.log.info(CAN_NOT_DELETE_MSG.format(compute=FARGATE_FULL_NAME, count=len(fargate_profiles)))
self.log.info("Waiting for Fargate profiles to delete. This will take some time.")
for profile in fargate_profiles:
# The API will return a (cluster) ResourceInUseException if you try
# to delete Fargate profiles in parallel the way we can with nodegroups,
# so each must be deleted sequentially
eks_hook.delete_fargate_profile(clusterName=self.cluster_name, fargateProfileName=profile)
eks_hook.conn.get_waiter("fargate_profile_deleted").wait(
clusterName=self.cluster_name, fargateProfileName=profile
)
self.log.info(SUCCESS_MSG.format(compute=FARGATE_FULL_NAME))
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
if event is None:
self.log.error("Trigger error. Event is None")
raise AirflowException("Trigger error. Event is None")
elif event["status"] == "success":
self.log.info("Cluster deleted successfully.")
class EksDeleteNodegroupOperator(BaseOperator):
"""
Deletes an Amazon EKS managed node group from an Amazon EKS Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksDeleteNodegroupOperator`
:param cluster_name: The name of the Amazon EKS Cluster associated with your nodegroup. (templated)
:param nodegroup_name: The name of the nodegroup to delete. (templated)
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check nodegroup state
:param waiter_max_attempts: The maximum number of attempts to check nodegroup state
:param deferrable: If True, the operator will wait asynchronously for the nodegroup to be deleted.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"cluster_name",
"nodegroup_name",
"wait_for_completion",
"aws_conn_id",
"region",
)
def __init__(
self,
cluster_name: str,
nodegroup_name: str,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 40,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
self.cluster_name = cluster_name
self.nodegroup_name = nodegroup_name
self.wait_for_completion = wait_for_completion
self.aws_conn_id = aws_conn_id
self.region = region
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
super().__init__(**kwargs)
def execute(self, context: Context):
eks_hook = EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
eks_hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=self.nodegroup_name)
if self.deferrable:
self.defer(
trigger=EksDeleteNodegroupTrigger(
cluster_name=self.cluster_name,
nodegroup_name=self.nodegroup_name,
aws_conn_id=self.aws_conn_id,
region_name=self.region,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
elif self.wait_for_completion:
self.log.info("Waiting for nodegroup to delete. This will take some time.")
eks_hook.conn.get_waiter("nodegroup_deleted").wait(
clusterName=self.cluster_name, nodegroupName=self.nodegroup_name
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error deleting nodegroup: {event}")
return
class EksDeleteFargateProfileOperator(BaseOperator):
"""
Deletes an AWS Fargate profile from an Amazon EKS Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksDeleteFargateProfileOperator`
:param cluster_name: The name of the Amazon EKS cluster associated with your Fargate profile. (templated)
:param fargate_profile_name: The name of the AWS Fargate profile to delete. (templated)
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check profile status
:param waiter_max_attempts: The maximum number of attempts to check the status of the profile.
:param deferrable: If True, the operator will wait asynchronously for the profile to be deleted.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields: Sequence[str] = (
"cluster_name",
"fargate_profile_name",
"wait_for_completion",
"aws_conn_id",
"region",
)
def __init__(
self,
cluster_name: str,
fargate_profile_name: str,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.fargate_profile_name = fargate_profile_name
self.wait_for_completion = wait_for_completion
self.aws_conn_id = aws_conn_id
self.region = region
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context):
eks_hook = EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
eks_hook.delete_fargate_profile(
clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name
)
if self.deferrable:
self.defer(
trigger=EksDeleteFargateProfileTrigger(
cluster_name=self.cluster_name,
fargate_profile_name=self.fargate_profile_name,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
region=self.region,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=(self.waiter_max_attempts * self.waiter_delay + 60)),
)
elif self.wait_for_completion:
self.log.info("Waiting for Fargate profile to delete. This will take some time.")
eks_hook.conn.get_waiter("fargate_profile_deleted").wait(
clusterName=self.cluster_name,
fargateProfileName=self.fargate_profile_name,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
def execute_complete(self, context, event=None):
if event["status"] != "success":
raise AirflowException(f"Error deleting Fargate profile: {event}")
else:
self.log.info("Fargate profile deleted successfully")
return
class EksPodOperator(KubernetesPodOperator):
"""
Executes a task in a Kubernetes pod on the specified Amazon EKS Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksPodOperator`
:param cluster_name: The name of the Amazon EKS Cluster to execute the task on. (templated)
:param cluster_role_arn: The Amazon Resource Name (ARN) of the IAM role that provides permissions
for the Kubernetes control plane to make calls to AWS API operations on your behalf. (templated)
:param in_cluster: If True, look for config inside the cluster; if False look for a local file path.
:param namespace: The namespace in which to execute the pod. (templated)
:param pod_name: The unique name to give the pod. (templated)
:param aws_profile: The named profile containing the credentials for the AWS CLI tool to use.
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param on_finish_action: What to do when the pod reaches its final state, or the execution is interrupted.
If "delete_pod", the pod will be deleted regardless it's state; if "delete_succeeded_pod",
only succeeded pod will be deleted. You can set to "keep_pod" to keep the pod.
Current default is `keep_pod`, but this will be changed in the next major release of this provider.
:param is_delete_operator_pod: What to do when the pod reaches its final
state, or the execution is interrupted. If True, delete the
pod; if False, leave the pod. Current default is False, but this will be
changed in the next major release of this provider.
Deprecated - use `on_finish_action` instead.
"""
template_fields: Sequence[str] = tuple(
{
"cluster_name",
"in_cluster",
"namespace",
"pod_name",
"aws_conn_id",
"region",
}
| set(KubernetesPodOperator.template_fields)
)
def __init__(
self,
cluster_name: str,
# Setting in_cluster to False tells the pod that the config
# file is stored locally in the worker and not in the cluster.
in_cluster: bool = False,
namespace: str = DEFAULT_NAMESPACE_NAME,
pod_context: str | None = None,
pod_name: str | None = None,
pod_username: str | None = None,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
on_finish_action: str | None = None,
is_delete_operator_pod: bool | None = None,
**kwargs,
) -> None:
if is_delete_operator_pod is not None:
warnings.warn(
"`is_delete_operator_pod` parameter is deprecated, please use `on_finish_action`",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["on_finish_action"] = (
OnFinishAction.DELETE_POD if is_delete_operator_pod else OnFinishAction.KEEP_POD
)
else:
if on_finish_action is not None:
kwargs["on_finish_action"] = OnFinishAction(on_finish_action)
else:
warnings.warn(
f"You have not set parameter `on_finish_action` in class {self.__class__.__name__}. "
"Currently the default for this parameter is `keep_pod` but in a future release"
" the default will be changed to `delete_pod`. To ensure pods are not deleted in"
" the future you will need to set `on_finish_action=keep_pod` explicitly.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["on_finish_action"] = OnFinishAction.KEEP_POD
self.cluster_name = cluster_name
self.in_cluster = in_cluster
self.namespace = namespace
self.pod_name = pod_name
self.aws_conn_id = aws_conn_id
self.region = region
super().__init__(
in_cluster=self.in_cluster,
namespace=self.namespace,
name=self.pod_name,
**kwargs,
)
# There is no need to manage the kube_config file, as it will be generated automatically.
# All Kubernetes parameters (except config_file) are also valid for the EksPodOperator.
if self.config_file:
raise AirflowException("The config_file is not an allowed parameter for the EksPodOperator.")
def execute(self, context: Context):
eks_hook = EksHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
with eks_hook.generate_config_file(
eks_cluster_name=self.cluster_name, pod_namespace=self.namespace
) as self.config_file:
return super().execute(context)
| 50,133 | 45.506494 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/rds.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import warnings
from datetime import timedelta
from typing import TYPE_CHECKING, Sequence
from mypy_boto3_rds.type_defs import TagTypeDef
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.rds import RdsHook
from airflow.providers.amazon.aws.triggers.rds import RdsDbInstanceTrigger
from airflow.providers.amazon.aws.utils.rds import RdsDbType
from airflow.providers.amazon.aws.utils.tags import format_tags
from airflow.providers.amazon.aws.utils.waiter_with_logging import wait
if TYPE_CHECKING:
from airflow.utils.context import Context
class RdsBaseOperator(BaseOperator):
"""Base operator that implements common functions for all operators."""
ui_color = "#eeaa88"
ui_fgcolor = "#ffffff"
def __init__(
self,
*args,
aws_conn_id: str = "aws_conn_id",
region_name: str | None = None,
hook_params: dict | None = None,
**kwargs,
):
if hook_params is not None:
warnings.warn(
"The parameter hook_params is deprecated and will be removed. "
"Note that it is also incompatible with deferrable mode. "
"You can use the region_name parameter to specify the region. "
"If you were using hook_params for other purposes, please get in touch either on "
"airflow slack, or by opening a github issue on the project. "
"You can mention https://github.com/apache/airflow/pull/32352",
AirflowProviderDeprecationWarning,
stacklevel=3, # 2 is in the operator's init, 3 is in the user code creating the operator
)
hook_params = hook_params or {}
self.region_name = region_name or hook_params.pop("region_name", None)
self.hook = RdsHook(aws_conn_id=aws_conn_id, region_name=self.region_name, **(hook_params))
super().__init__(*args, **kwargs)
self._await_interval = 60 # seconds
def execute(self, context: Context) -> str:
"""Different implementations for snapshots, tasks and events."""
raise NotImplementedError
def on_kill(self) -> None:
"""Different implementations for snapshots, tasks and events."""
raise NotImplementedError
class RdsCreateDbSnapshotOperator(RdsBaseOperator):
"""
Creates a snapshot of a DB instance or DB cluster.
The source DB instance or cluster must be in the available or storage-optimization state.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsCreateDbSnapshotOperator`
:param db_type: Type of the DB - either "instance" or "cluster"
:param db_identifier: The identifier of the instance or cluster that you want to create the snapshot of
:param db_snapshot_identifier: The identifier for the DB snapshot
:param tags: A dictionary of tags or a list of tags in format `[{"Key": "...", "Value": "..."},]`
`USER Tagging <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html>`__
:param wait_for_completion: If True, waits for creation of the DB snapshot to complete. (default: True)
"""
template_fields = ("db_snapshot_identifier", "db_identifier", "tags")
def __init__(
self,
*,
db_type: str,
db_identifier: str,
db_snapshot_identifier: str,
tags: Sequence[TagTypeDef] | dict | None = None,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_type = RdsDbType(db_type)
self.db_identifier = db_identifier
self.db_snapshot_identifier = db_snapshot_identifier
self.tags = tags
self.wait_for_completion = wait_for_completion
def execute(self, context: Context) -> str:
self.log.info(
"Starting to create snapshot of RDS %s '%s': %s",
self.db_type,
self.db_identifier,
self.db_snapshot_identifier,
)
formatted_tags = format_tags(self.tags)
if self.db_type.value == "instance":
create_instance_snap = self.hook.conn.create_db_snapshot(
DBInstanceIdentifier=self.db_identifier,
DBSnapshotIdentifier=self.db_snapshot_identifier,
Tags=formatted_tags,
)
create_response = json.dumps(create_instance_snap, default=str)
if self.wait_for_completion:
self.hook.wait_for_db_snapshot_state(self.db_snapshot_identifier, target_state="available")
else:
create_cluster_snap = self.hook.conn.create_db_cluster_snapshot(
DBClusterIdentifier=self.db_identifier,
DBClusterSnapshotIdentifier=self.db_snapshot_identifier,
Tags=formatted_tags,
)
create_response = json.dumps(create_cluster_snap, default=str)
if self.wait_for_completion:
self.hook.wait_for_db_cluster_snapshot_state(
self.db_snapshot_identifier, target_state="available"
)
return create_response
class RdsCopyDbSnapshotOperator(RdsBaseOperator):
"""
Copies the specified DB instance or DB cluster snapshot.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsCopyDbSnapshotOperator`
:param db_type: Type of the DB - either "instance" or "cluster"
:param source_db_snapshot_identifier: The identifier of the source snapshot
:param target_db_snapshot_identifier: The identifier of the target snapshot
:param kms_key_id: The AWS KMS key identifier for an encrypted DB snapshot
:param tags: A dictionary of tags or a list of tags in format `[{"Key": "...", "Value": "..."},]`
`USER Tagging <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html>`__
:param copy_tags: Whether to copy all tags from the source snapshot to the target snapshot (default False)
:param pre_signed_url: The URL that contains a Signature Version 4 signed request
:param option_group_name: The name of an option group to associate with the copy of the snapshot
Only when db_type='instance'
:param target_custom_availability_zone: The external custom Availability Zone identifier for the target
Only when db_type='instance'
:param source_region: The ID of the region that contains the snapshot to be copied
:param wait_for_completion: If True, waits for snapshot copy to complete. (default: True)
"""
template_fields = (
"source_db_snapshot_identifier",
"target_db_snapshot_identifier",
"tags",
"pre_signed_url",
"option_group_name",
)
def __init__(
self,
*,
db_type: str,
source_db_snapshot_identifier: str,
target_db_snapshot_identifier: str,
kms_key_id: str = "",
tags: Sequence[TagTypeDef] | dict | None = None,
copy_tags: bool = False,
pre_signed_url: str = "",
option_group_name: str = "",
target_custom_availability_zone: str = "",
source_region: str = "",
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_type = RdsDbType(db_type)
self.source_db_snapshot_identifier = source_db_snapshot_identifier
self.target_db_snapshot_identifier = target_db_snapshot_identifier
self.kms_key_id = kms_key_id
self.tags = tags
self.copy_tags = copy_tags
self.pre_signed_url = pre_signed_url
self.option_group_name = option_group_name
self.target_custom_availability_zone = target_custom_availability_zone
self.source_region = source_region
self.wait_for_completion = wait_for_completion
def execute(self, context: Context) -> str:
self.log.info(
"Starting to copy snapshot '%s' as '%s'",
self.source_db_snapshot_identifier,
self.target_db_snapshot_identifier,
)
formatted_tags = format_tags(self.tags)
if self.db_type.value == "instance":
copy_instance_snap = self.hook.conn.copy_db_snapshot(
SourceDBSnapshotIdentifier=self.source_db_snapshot_identifier,
TargetDBSnapshotIdentifier=self.target_db_snapshot_identifier,
KmsKeyId=self.kms_key_id,
Tags=formatted_tags,
CopyTags=self.copy_tags,
PreSignedUrl=self.pre_signed_url,
OptionGroupName=self.option_group_name,
TargetCustomAvailabilityZone=self.target_custom_availability_zone,
SourceRegion=self.source_region,
)
copy_response = json.dumps(copy_instance_snap, default=str)
if self.wait_for_completion:
self.hook.wait_for_db_snapshot_state(
self.target_db_snapshot_identifier, target_state="available"
)
else:
copy_cluster_snap = self.hook.conn.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier=self.source_db_snapshot_identifier,
TargetDBClusterSnapshotIdentifier=self.target_db_snapshot_identifier,
KmsKeyId=self.kms_key_id,
Tags=formatted_tags,
CopyTags=self.copy_tags,
PreSignedUrl=self.pre_signed_url,
SourceRegion=self.source_region,
)
copy_response = json.dumps(copy_cluster_snap, default=str)
if self.wait_for_completion:
self.hook.wait_for_db_cluster_snapshot_state(
self.target_db_snapshot_identifier, target_state="available"
)
return copy_response
class RdsDeleteDbSnapshotOperator(RdsBaseOperator):
"""
Deletes a DB instance or cluster snapshot or terminating the copy operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsDeleteDbSnapshotOperator`
:param db_type: Type of the DB - either "instance" or "cluster"
:param db_snapshot_identifier: The identifier for the DB instance or DB cluster snapshot
"""
template_fields = ("db_snapshot_identifier",)
def __init__(
self,
*,
db_type: str,
db_snapshot_identifier: str,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_type = RdsDbType(db_type)
self.db_snapshot_identifier = db_snapshot_identifier
self.wait_for_completion = wait_for_completion
def execute(self, context: Context) -> str:
self.log.info("Starting to delete snapshot '%s'", self.db_snapshot_identifier)
if self.db_type.value == "instance":
delete_instance_snap = self.hook.conn.delete_db_snapshot(
DBSnapshotIdentifier=self.db_snapshot_identifier,
)
delete_response = json.dumps(delete_instance_snap, default=str)
if self.wait_for_completion:
self.hook.wait_for_db_snapshot_state(self.db_snapshot_identifier, target_state="deleted")
else:
delete_cluster_snap = self.hook.conn.delete_db_cluster_snapshot(
DBClusterSnapshotIdentifier=self.db_snapshot_identifier,
)
delete_response = json.dumps(delete_cluster_snap, default=str)
if self.wait_for_completion:
self.hook.wait_for_db_cluster_snapshot_state(
self.db_snapshot_identifier, target_state="deleted"
)
return delete_response
class RdsStartExportTaskOperator(RdsBaseOperator):
"""
Starts an export of a snapshot to Amazon S3. The provided IAM role must have access to the S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsStartExportTaskOperator`
:param export_task_identifier: A unique identifier for the snapshot export task.
:param source_arn: The Amazon Resource Name (ARN) of the snapshot to export to Amazon S3.
:param s3_bucket_name: The name of the Amazon S3 bucket to export the snapshot to.
:param iam_role_arn: The name of the IAM role to use for writing to the Amazon S3 bucket.
:param kms_key_id: The ID of the Amazon Web Services KMS key to use to encrypt the snapshot.
:param s3_prefix: The Amazon S3 bucket prefix to use as the file name and path of the exported snapshot.
:param export_only: The data to be exported from the snapshot.
:param wait_for_completion: If True, waits for the DB snapshot export to complete. (default: True)
"""
template_fields = (
"export_task_identifier",
"source_arn",
"s3_bucket_name",
"iam_role_arn",
"kms_key_id",
"s3_prefix",
"export_only",
)
def __init__(
self,
*,
export_task_identifier: str,
source_arn: str,
s3_bucket_name: str,
iam_role_arn: str,
kms_key_id: str,
s3_prefix: str = "",
export_only: list[str] | None = None,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.export_task_identifier = export_task_identifier
self.source_arn = source_arn
self.s3_bucket_name = s3_bucket_name
self.iam_role_arn = iam_role_arn
self.kms_key_id = kms_key_id
self.s3_prefix = s3_prefix
self.export_only = export_only or []
self.wait_for_completion = wait_for_completion
def execute(self, context: Context) -> str:
self.log.info("Starting export task %s for snapshot %s", self.export_task_identifier, self.source_arn)
start_export = self.hook.conn.start_export_task(
ExportTaskIdentifier=self.export_task_identifier,
SourceArn=self.source_arn,
S3BucketName=self.s3_bucket_name,
IamRoleArn=self.iam_role_arn,
KmsKeyId=self.kms_key_id,
S3Prefix=self.s3_prefix,
ExportOnly=self.export_only,
)
if self.wait_for_completion:
self.hook.wait_for_export_task_state(self.export_task_identifier, target_state="complete")
return json.dumps(start_export, default=str)
class RdsCancelExportTaskOperator(RdsBaseOperator):
"""
Cancels an export task in progress that is exporting a snapshot to Amazon S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsCancelExportTaskOperator`
:param export_task_identifier: The identifier of the snapshot export task to cancel
:param wait_for_completion: If True, waits for DB snapshot export to cancel. (default: True)
"""
template_fields = ("export_task_identifier",)
def __init__(
self,
*,
export_task_identifier: str,
wait_for_completion: bool = True,
check_interval: int = 30,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.export_task_identifier = export_task_identifier
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
def execute(self, context: Context) -> str:
self.log.info("Canceling export task %s", self.export_task_identifier)
cancel_export = self.hook.conn.cancel_export_task(
ExportTaskIdentifier=self.export_task_identifier,
)
if self.wait_for_completion:
self.hook.wait_for_export_task_state(
self.export_task_identifier, target_state="canceled", check_interval=self.check_interval
)
return json.dumps(cancel_export, default=str)
class RdsCreateEventSubscriptionOperator(RdsBaseOperator):
"""
Creates an RDS event notification subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsCreateEventSubscriptionOperator`
:param subscription_name: The name of the subscription (must be less than 255 characters)
:param sns_topic_arn: The ARN of the SNS topic created for event notification
:param source_type: The type of source that is generating the events. Valid values: db-instance |
db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy
:param event_categories: A list of event categories for a source type that you want to subscribe to
`USER Events <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html>`__
:param source_ids: The list of identifiers of the event sources for which events are returned
:param enabled: A value that indicates whether to activate the subscription (default True)l
:param tags: A dictionary of tags or a list of tags in format `[{"Key": "...", "Value": "..."},]`
`USER Tagging <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html>`__
:param wait_for_completion: If True, waits for creation of the subscription to complete. (default: True)
"""
template_fields = (
"subscription_name",
"sns_topic_arn",
"source_type",
"event_categories",
"source_ids",
"tags",
)
def __init__(
self,
*,
subscription_name: str,
sns_topic_arn: str,
source_type: str = "",
event_categories: Sequence[str] | None = None,
source_ids: Sequence[str] | None = None,
enabled: bool = True,
tags: Sequence[TagTypeDef] | dict | None = None,
wait_for_completion: bool = True,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.subscription_name = subscription_name
self.sns_topic_arn = sns_topic_arn
self.source_type = source_type
self.event_categories = event_categories or []
self.source_ids = source_ids or []
self.enabled = enabled
self.tags = tags
self.wait_for_completion = wait_for_completion
def execute(self, context: Context) -> str:
self.log.info("Creating event subscription '%s' to '%s'", self.subscription_name, self.sns_topic_arn)
formatted_tags = format_tags(self.tags)
create_subscription = self.hook.conn.create_event_subscription(
SubscriptionName=self.subscription_name,
SnsTopicArn=self.sns_topic_arn,
SourceType=self.source_type,
EventCategories=self.event_categories,
SourceIds=self.source_ids,
Enabled=self.enabled,
Tags=formatted_tags,
)
if self.wait_for_completion:
self.hook.wait_for_event_subscription_state(self.subscription_name, target_state="active")
return json.dumps(create_subscription, default=str)
class RdsDeleteEventSubscriptionOperator(RdsBaseOperator):
"""
Deletes an RDS event notification subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsDeleteEventSubscriptionOperator`
:param subscription_name: The name of the RDS event notification subscription you want to delete
"""
template_fields = ("subscription_name",)
def __init__(
self,
*,
subscription_name: str,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.subscription_name = subscription_name
def execute(self, context: Context) -> str:
self.log.info(
"Deleting event subscription %s",
self.subscription_name,
)
delete_subscription = self.hook.conn.delete_event_subscription(
SubscriptionName=self.subscription_name,
)
return json.dumps(delete_subscription, default=str)
class RdsCreateDbInstanceOperator(RdsBaseOperator):
"""
Creates an RDS DB instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsCreateDbInstanceOperator`
:param db_instance_identifier: The DB instance identifier, must start with a letter and
contain from 1 to 63 letters, numbers, or hyphens
:param db_instance_class: The compute and memory capacity of the DB instance, for example db.m5.large
:param engine: The name of the database engine to be used for this instance
:param rds_kwargs: Named arguments to pass to boto3 RDS client function ``create_db_instance``
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.create_db_instance
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param wait_for_completion: If True, waits for creation of the DB instance to complete. (default: True)
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check DB instance state
:param waiter_max_attempts: The maximum number of attempts to check DB instance state
:param deferrable: If True, the operator will wait asynchronously for the DB instance to be created.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields = ("db_instance_identifier", "db_instance_class", "engine", "rds_kwargs")
def __init__(
self,
*,
db_instance_identifier: str,
db_instance_class: str,
engine: str,
rds_kwargs: dict | None = None,
aws_conn_id: str = "aws_default",
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_instance_identifier = db_instance_identifier
self.db_instance_class = db_instance_class
self.engine = engine
self.rds_kwargs = rds_kwargs or {}
self.wait_for_completion = False if deferrable else wait_for_completion
self.deferrable = deferrable
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
def execute(self, context: Context) -> str:
self.log.info("Creating new DB instance %s", self.db_instance_identifier)
create_db_instance = self.hook.conn.create_db_instance(
DBInstanceIdentifier=self.db_instance_identifier,
DBInstanceClass=self.db_instance_class,
Engine=self.engine,
**self.rds_kwargs,
)
if self.deferrable:
self.defer(
trigger=RdsDbInstanceTrigger(
db_instance_identifier=self.db_instance_identifier,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
waiter_name="db_instance_available",
# ignoring type because create_db_instance is a dict
response=create_db_instance, # type: ignore[arg-type]
),
method_name="execute_complete",
timeout=timedelta(seconds=self.waiter_delay * self.waiter_max_attempts),
)
if self.wait_for_completion:
waiter = self.hook.conn.get_waiter("db_instance_available")
wait(
waiter=waiter,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
args={"DBInstanceIdentifier": self.db_instance_identifier},
failure_message="DB instance creation failed",
status_message="DB Instance status is",
status_args=["DBInstances[0].DBInstanceStatus"],
)
return json.dumps(create_db_instance, default=str)
def execute_complete(self, context, event=None) -> str:
if event["status"] != "success":
raise AirflowException(f"DB instance creation failed: {event}")
else:
return json.dumps(event["response"], default=str)
class RdsDeleteDbInstanceOperator(RdsBaseOperator):
"""
Deletes an RDS DB Instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsDeleteDbInstanceOperator`
:param db_instance_identifier: The DB instance identifier for the DB instance to be deleted
:param rds_kwargs: Named arguments to pass to boto3 RDS client function ``delete_db_instance``
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.delete_db_instance
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param wait_for_completion: If True, waits for deletion of the DB instance to complete. (default: True)
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check DB instance state
:param waiter_max_attempts: The maximum number of attempts to check DB instance state
:param deferrable: If True, the operator will wait asynchronously for the DB instance to be created.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
template_fields = ("db_instance_identifier", "rds_kwargs")
def __init__(
self,
*,
db_instance_identifier: str,
rds_kwargs: dict | None = None,
aws_conn_id: str = "aws_default",
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_instance_identifier = db_instance_identifier
self.rds_kwargs = rds_kwargs or {}
self.wait_for_completion = False if deferrable else wait_for_completion
self.deferrable = deferrable
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
def execute(self, context: Context) -> str:
self.log.info("Deleting DB instance %s", self.db_instance_identifier)
delete_db_instance = self.hook.conn.delete_db_instance(
DBInstanceIdentifier=self.db_instance_identifier,
**self.rds_kwargs,
)
if self.deferrable:
self.defer(
trigger=RdsDbInstanceTrigger(
db_instance_identifier=self.db_instance_identifier,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
waiter_name="db_instance_deleted",
# ignoring type because delete_db_instance is a dict
response=delete_db_instance, # type: ignore[arg-type]
),
method_name="execute_complete",
timeout=timedelta(seconds=self.waiter_delay * self.waiter_max_attempts),
)
if self.wait_for_completion:
waiter = self.hook.conn.get_waiter("db_instance_deleted")
wait(
waiter=waiter,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
args={"DBInstanceIdentifier": self.db_instance_identifier},
failure_message="DB instance deletion failed",
status_message="DB Instance status is",
status_args=["DBInstances[0].DBInstanceStatus"],
)
return json.dumps(delete_db_instance, default=str)
def execute_complete(self, context, event=None) -> str:
if event["status"] != "success":
raise AirflowException(f"DB instance deletion failed: {event}")
else:
return json.dumps(event["response"], default=str)
class RdsStartDbOperator(RdsBaseOperator):
"""
Starts an RDS DB instance / cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsStartDbOperator`
:param db_identifier: The AWS identifier of the DB to start
:param db_type: Type of the DB - either "instance" or "cluster" (default: "instance")
:param aws_conn_id: The Airflow connection used for AWS credentials. (default: "aws_default")
:param wait_for_completion: If True, waits for DB to start. (default: True)
"""
template_fields = ("db_identifier", "db_type")
def __init__(
self,
*,
db_identifier: str,
db_type: RdsDbType | str = RdsDbType.INSTANCE,
aws_conn_id: str = "aws_default",
wait_for_completion: bool = True,
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_identifier = db_identifier
self.db_type = db_type
self.wait_for_completion = wait_for_completion
def execute(self, context: Context) -> str:
self.db_type = RdsDbType(self.db_type)
start_db_response = self._start_db()
if self.wait_for_completion:
self._wait_until_db_available()
return json.dumps(start_db_response, default=str)
def _start_db(self):
self.log.info("Starting DB %s '%s'", self.db_type.value, self.db_identifier)
if self.db_type == RdsDbType.INSTANCE:
response = self.hook.conn.start_db_instance(DBInstanceIdentifier=self.db_identifier)
else:
response = self.hook.conn.start_db_cluster(DBClusterIdentifier=self.db_identifier)
return response
def _wait_until_db_available(self):
self.log.info("Waiting for DB %s to reach 'available' state", self.db_type.value)
if self.db_type == RdsDbType.INSTANCE:
self.hook.wait_for_db_instance_state(self.db_identifier, target_state="available")
else:
self.hook.wait_for_db_cluster_state(self.db_identifier, target_state="available")
class RdsStopDbOperator(RdsBaseOperator):
"""
Stops an RDS DB instance / cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsStopDbOperator`
:param db_identifier: The AWS identifier of the DB to stop
:param db_type: Type of the DB - either "instance" or "cluster" (default: "instance")
:param db_snapshot_identifier: The instance identifier of the DB Snapshot to create before
stopping the DB instance. The default value (None) skips snapshot creation. This
parameter is ignored when ``db_type`` is "cluster"
:param aws_conn_id: The Airflow connection used for AWS credentials. (default: "aws_default")
:param wait_for_completion: If True, waits for DB to stop. (default: True)
"""
template_fields = ("db_identifier", "db_snapshot_identifier", "db_type")
def __init__(
self,
*,
db_identifier: str,
db_type: RdsDbType | str = RdsDbType.INSTANCE,
db_snapshot_identifier: str | None = None,
aws_conn_id: str = "aws_default",
wait_for_completion: bool = True,
**kwargs,
):
super().__init__(aws_conn_id=aws_conn_id, **kwargs)
self.db_identifier = db_identifier
self.db_type = db_type
self.db_snapshot_identifier = db_snapshot_identifier
self.wait_for_completion = wait_for_completion
def execute(self, context: Context) -> str:
self.db_type = RdsDbType(self.db_type)
stop_db_response = self._stop_db()
if self.wait_for_completion:
self._wait_until_db_stopped()
return json.dumps(stop_db_response, default=str)
def _stop_db(self):
self.log.info("Stopping DB %s '%s'", self.db_type.value, self.db_identifier)
if self.db_type == RdsDbType.INSTANCE:
conn_params = {"DBInstanceIdentifier": self.db_identifier}
# The db snapshot parameter is optional, but the AWS SDK raises an exception
# if passed a null value. Only set snapshot id if value is present.
if self.db_snapshot_identifier:
conn_params["DBSnapshotIdentifier"] = self.db_snapshot_identifier
response = self.hook.conn.stop_db_instance(**conn_params)
else:
if self.db_snapshot_identifier:
self.log.warning(
"'db_snapshot_identifier' does not apply to db clusters. "
"Remove it to silence this warning."
)
response = self.hook.conn.stop_db_cluster(DBClusterIdentifier=self.db_identifier)
return response
def _wait_until_db_stopped(self):
self.log.info("Waiting for DB %s to reach 'stopped' state", self.db_type.value)
if self.db_type == RdsDbType.INSTANCE:
self.hook.wait_for_db_instance_state(self.db_identifier, target_state="stopped")
else:
self.hook.wait_for_db_cluster_state(self.db_identifier, target_state="stopped")
__all__ = [
"RdsCreateDbSnapshotOperator",
"RdsCopyDbSnapshotOperator",
"RdsDeleteDbSnapshotOperator",
"RdsCreateEventSubscriptionOperator",
"RdsDeleteEventSubscriptionOperator",
"RdsStartExportTaskOperator",
"RdsCancelExportTaskOperator",
"RdsCreateDbInstanceOperator",
"RdsDeleteDbInstanceOperator",
"RdsStartDbOperator",
"RdsStopDbOperator",
]
| 35,538 | 40.810588 | 121 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/step_function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.step_function import StepFunctionHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class StepFunctionStartExecutionOperator(BaseOperator):
"""
An Operator that begins execution of an AWS Step Function State Machine.
Additional arguments may be specified and are passed down to the underlying BaseOperator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StepFunctionStartExecutionOperator`
:param state_machine_arn: ARN of the Step Function State Machine
:param name: The name of the execution.
:param state_machine_input: JSON data input to pass to the State Machine
:param aws_conn_id: aws connection to uses
:param do_xcom_push: if True, execution_arn is pushed to XCom with key execution_arn.
"""
template_fields: Sequence[str] = ("state_machine_arn", "name", "input")
template_ext: Sequence[str] = ()
ui_color = "#f9c915"
def __init__(
self,
*,
state_machine_arn: str,
name: str | None = None,
state_machine_input: dict | str | None = None,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.state_machine_arn = state_machine_arn
self.name = name
self.input = state_machine_input
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def execute(self, context: Context):
hook = StepFunctionHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
execution_arn = hook.start_execution(self.state_machine_arn, self.name, self.input)
if execution_arn is None:
raise AirflowException(f"Failed to start State Machine execution for: {self.state_machine_arn}")
self.log.info("Started State Machine execution for %s: %s", self.state_machine_arn, execution_arn)
return execution_arn
class StepFunctionGetExecutionOutputOperator(BaseOperator):
"""
An Operator that returns the output of an AWS Step Function State Machine execution.
Additional arguments may be specified and are passed down to the underlying BaseOperator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StepFunctionGetExecutionOutputOperator`
:param execution_arn: ARN of the Step Function State Machine Execution
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
"""
template_fields: Sequence[str] = ("execution_arn",)
template_ext: Sequence[str] = ()
ui_color = "#f9c915"
def __init__(
self,
*,
execution_arn: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.execution_arn = execution_arn
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def execute(self, context: Context):
hook = StepFunctionHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
execution_status = hook.describe_execution(self.execution_arn)
response = None
if "output" in execution_status:
response = json.loads(execution_status["output"])
elif "error" in execution_status:
response = json.loads(execution_status["error"])
self.log.info("Got State Machine Execution output for %s", self.execution_arn)
return response
| 4,583 | 35.672 | 108 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/glacier.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.glacier import GlacierHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GlacierCreateJobOperator(BaseOperator):
"""
Initiate an Amazon Glacier inventory-retrieval job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlacierCreateJobOperator`
:param aws_conn_id: The reference to the AWS connection details
:param vault_name: the Glacier vault on which job is executed
"""
template_fields: Sequence[str] = ("vault_name",)
def __init__(
self,
*,
aws_conn_id="aws_default",
vault_name: str,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.vault_name = vault_name
def execute(self, context: Context):
hook = GlacierHook(aws_conn_id=self.aws_conn_id)
return hook.retrieve_inventory(vault_name=self.vault_name)
class GlacierUploadArchiveOperator(BaseOperator):
"""
This operator add an archive to an Amazon S3 Glacier vault.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlacierUploadArchiveOperator`
:param vault_name: The name of the vault
:param body: A bytes or seekable file-like object. The data to upload.
:param checksum: The SHA256 tree hash of the data being uploaded.
This parameter is automatically populated if it is not provided
:param archive_description: The description of the archive you are uploading
:param account_id: (Optional) AWS account ID of the account that owns the vault.
Defaults to the credentials used to sign the request
:param aws_conn_id: The reference to the AWS connection details
"""
template_fields: Sequence[str] = ("vault_name",)
def __init__(
self,
*,
vault_name: str,
body: object,
checksum: str | None = None,
archive_description: str | None = None,
account_id: str | None = None,
aws_conn_id="aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.account_id = account_id
self.vault_name = vault_name
self.body = body
self.checksum = checksum
self.archive_description = archive_description
def execute(self, context: Context):
hook = GlacierHook(aws_conn_id=self.aws_conn_id)
return hook.get_conn().upload_archive(
accountId=self.account_id,
vaultName=self.vault_name,
archiveDescription=self.archive_description,
body=self.body,
checksum=self.checksum,
)
| 3,707 | 33.654206 | 84 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/appflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import datetime, timedelta
from functools import cached_property
from time import sleep
from typing import TYPE_CHECKING, cast
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.operators.python import ShortCircuitOperator
from airflow.providers.amazon.aws.hooks.appflow import AppflowHook
from airflow.providers.amazon.aws.utils import datetime_to_epoch_ms
if TYPE_CHECKING:
from mypy_boto3_appflow.type_defs import (
DescribeFlowExecutionRecordsResponseTypeDef,
ExecutionRecordTypeDef,
TaskTypeDef,
)
from airflow.utils.context import Context
SUPPORTED_SOURCES = {"salesforce", "zendesk"}
MANDATORY_FILTER_DATE_MSG = "The filter_date argument is mandatory for {entity}!"
NOT_SUPPORTED_SOURCE_MSG = "Source {source} is not supported for {entity}!"
class AppflowBaseOperator(BaseOperator):
"""
Amazon Appflow Base Operator class (not supposed to be used directly in DAGs).
:param source: The source name (Supported: salesforce, zendesk)
:param flow_name: The flow name
:param flow_update: A boolean to enable/disable a flow update before the run
:param source_field: The field name to apply filters
:param filter_date: The date value (or template) to be used in filters.
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
ui_color = "#2bccbd"
UPDATE_PROPAGATION_TIME: int = 15
def __init__(
self,
source: str,
flow_name: str,
flow_update: bool,
source_field: str | None = None,
filter_date: str | None = None,
poll_interval: int = 20,
aws_conn_id: str = "aws_default",
region: str | None = None,
wait_for_completion: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
if source not in SUPPORTED_SOURCES:
raise ValueError(f"{source} is not a supported source (options: {SUPPORTED_SOURCES})!")
self.filter_date = filter_date
self.flow_name = flow_name
self.source = source
self.source_field = source_field
self.poll_interval = poll_interval
self.aws_conn_id = aws_conn_id
self.region = region
self.flow_update = flow_update
self.wait_for_completion = wait_for_completion
@cached_property
def hook(self) -> AppflowHook:
"""Create and return an AppflowHook."""
return AppflowHook(aws_conn_id=self.aws_conn_id, region_name=self.region)
def execute(self, context: Context) -> None:
self.filter_date_parsed: datetime | None = (
datetime.fromisoformat(self.filter_date) if self.filter_date else None
)
self.connector_type = self._get_connector_type()
if self.flow_update:
self._update_flow()
# while schedule flows will pick up the update right away, on-demand flows might use out of date
# info if triggered right after an update, so we need to wait a bit for the DB to be consistent.
sleep(AppflowBaseOperator.UPDATE_PROPAGATION_TIME)
self._run_flow(context)
def _get_connector_type(self) -> str:
response = self.hook.conn.describe_flow(flowName=self.flow_name)
connector_type = response["sourceFlowConfig"]["connectorType"]
if self.source != connector_type.lower():
raise ValueError(f"Incompatible source ({self.source} and connector type ({connector_type})!")
return connector_type
def _update_flow(self) -> None:
self.hook.update_flow_filter(flow_name=self.flow_name, filter_tasks=[], set_trigger_ondemand=True)
def _run_flow(self, context) -> str:
execution_id = self.hook.run_flow(
flow_name=self.flow_name,
poll_interval=self.poll_interval,
wait_for_completion=self.wait_for_completion,
)
task_instance = context["task_instance"]
task_instance.xcom_push("execution_id", execution_id)
return execution_id
class AppflowRunOperator(AppflowBaseOperator):
"""
Execute a Appflow run with filters as is.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunOperator`
:param source: The source name (Supported: salesforce, zendesk)
:param flow_name: The flow name
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
def __init__(
self,
source: str,
flow_name: str,
poll_interval: int = 20,
aws_conn_id: str = "aws_default",
region: str | None = None,
wait_for_completion: bool = True,
**kwargs,
) -> None:
if source not in {"salesforce", "zendesk"}:
raise ValueError(NOT_SUPPORTED_SOURCE_MSG.format(source=source, entity="AppflowRunOperator"))
super().__init__(
source=source,
flow_name=flow_name,
flow_update=False,
source_field=None,
filter_date=None,
poll_interval=poll_interval,
aws_conn_id=aws_conn_id,
region=region,
wait_for_completion=wait_for_completion,
**kwargs,
)
class AppflowRunFullOperator(AppflowBaseOperator):
"""
Execute a Appflow full run removing any filter.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunFullOperator`
:param source: The source name (Supported: salesforce, zendesk)
:param flow_name: The flow name
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
def __init__(
self,
source: str,
flow_name: str,
poll_interval: int = 20,
aws_conn_id: str = "aws_default",
region: str | None = None,
wait_for_completion: bool = True,
**kwargs,
) -> None:
if source not in {"salesforce", "zendesk"}:
raise ValueError(NOT_SUPPORTED_SOURCE_MSG.format(source=source, entity="AppflowRunFullOperator"))
super().__init__(
source=source,
flow_name=flow_name,
flow_update=True,
source_field=None,
filter_date=None,
poll_interval=poll_interval,
aws_conn_id=aws_conn_id,
region=region,
wait_for_completion=wait_for_completion,
**kwargs,
)
class AppflowRunBeforeOperator(AppflowBaseOperator):
"""
Execute a Appflow run after updating the filters to select only previous data.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunBeforeOperator`
:param source: The source name (Supported: salesforce)
:param flow_name: The flow name
:param source_field: The field name to apply filters
:param filter_date: The date value (or template) to be used in filters.
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
template_fields = ("filter_date",)
def __init__(
self,
source: str,
flow_name: str,
source_field: str,
filter_date: str,
poll_interval: int = 20,
aws_conn_id: str = "aws_default",
region: str | None = None,
wait_for_completion: bool = True,
**kwargs,
) -> None:
if not filter_date:
raise ValueError(MANDATORY_FILTER_DATE_MSG.format(entity="AppflowRunBeforeOperator"))
if source != "salesforce":
raise ValueError(
NOT_SUPPORTED_SOURCE_MSG.format(source=source, entity="AppflowRunBeforeOperator")
)
super().__init__(
source=source,
flow_name=flow_name,
flow_update=True,
source_field=source_field,
filter_date=filter_date,
poll_interval=poll_interval,
aws_conn_id=aws_conn_id,
region=region,
wait_for_completion=wait_for_completion,
**kwargs,
)
def _update_flow(self) -> None:
if not self.filter_date_parsed:
raise ValueError(f"Invalid filter_date argument parser value: {self.filter_date_parsed}")
if not self.source_field:
raise ValueError(f"Invalid source_field argument value: {self.source_field}")
filter_task: TaskTypeDef = {
"taskType": "Filter",
"connectorOperator": {self.connector_type: "LESS_THAN"}, # type: ignore
"sourceFields": [self.source_field],
"taskProperties": {
"DATA_TYPE": "datetime",
"VALUE": str(datetime_to_epoch_ms(self.filter_date_parsed)),
}, # NOT inclusive
}
self.hook.update_flow_filter(
flow_name=self.flow_name, filter_tasks=[filter_task], set_trigger_ondemand=True
)
class AppflowRunAfterOperator(AppflowBaseOperator):
"""
Execute a Appflow run after updating the filters to select only future data.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunAfterOperator`
:param source: The source name (Supported: salesforce, zendesk)
:param flow_name: The flow name
:param source_field: The field name to apply filters
:param filter_date: The date value (or template) to be used in filters.
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
template_fields = ("filter_date",)
def __init__(
self,
source: str,
flow_name: str,
source_field: str,
filter_date: str,
poll_interval: int = 20,
aws_conn_id: str = "aws_default",
region: str | None = None,
wait_for_completion: bool = True,
**kwargs,
) -> None:
if not filter_date:
raise ValueError(MANDATORY_FILTER_DATE_MSG.format(entity="AppflowRunAfterOperator"))
if source not in {"salesforce", "zendesk"}:
raise ValueError(NOT_SUPPORTED_SOURCE_MSG.format(source=source, entity="AppflowRunAfterOperator"))
super().__init__(
source=source,
flow_name=flow_name,
flow_update=True,
source_field=source_field,
filter_date=filter_date,
poll_interval=poll_interval,
aws_conn_id=aws_conn_id,
region=region,
wait_for_completion=wait_for_completion,
**kwargs,
)
def _update_flow(self) -> None:
if not self.filter_date_parsed:
raise ValueError(f"Invalid filter_date argument parser value: {self.filter_date_parsed}")
if not self.source_field:
raise ValueError(f"Invalid source_field argument value: {self.source_field}")
filter_task: TaskTypeDef = {
"taskType": "Filter",
"connectorOperator": {self.connector_type: "GREATER_THAN"}, # type: ignore
"sourceFields": [self.source_field],
"taskProperties": {
"DATA_TYPE": "datetime",
"VALUE": str(datetime_to_epoch_ms(self.filter_date_parsed)),
}, # NOT inclusive
}
self.hook.update_flow_filter(
flow_name=self.flow_name, filter_tasks=[filter_task], set_trigger_ondemand=True
)
class AppflowRunDailyOperator(AppflowBaseOperator):
"""
Execute a Appflow run after updating the filters to select only a single day.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunDailyOperator`
:param source: The source name (Supported: salesforce)
:param flow_name: The flow name
:param source_field: The field name to apply filters
:param filter_date: The date value (or template) to be used in filters.
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
template_fields = ("filter_date",)
def __init__(
self,
source: str,
flow_name: str,
source_field: str,
filter_date: str,
poll_interval: int = 20,
aws_conn_id: str = "aws_default",
region: str | None = None,
wait_for_completion: bool = True,
**kwargs,
) -> None:
if not filter_date:
raise ValueError(MANDATORY_FILTER_DATE_MSG.format(entity="AppflowRunDailyOperator"))
if source != "salesforce":
raise ValueError(NOT_SUPPORTED_SOURCE_MSG.format(source=source, entity="AppflowRunDailyOperator"))
super().__init__(
source=source,
flow_name=flow_name,
flow_update=True,
source_field=source_field,
filter_date=filter_date,
poll_interval=poll_interval,
aws_conn_id=aws_conn_id,
region=region,
wait_for_completion=wait_for_completion,
**kwargs,
)
def _update_flow(self) -> None:
if not self.filter_date_parsed:
raise ValueError(f"Invalid filter_date argument parser value: {self.filter_date_parsed}")
if not self.source_field:
raise ValueError(f"Invalid source_field argument value: {self.source_field}")
start_filter_date = self.filter_date_parsed - timedelta(milliseconds=1)
end_filter_date = self.filter_date_parsed + timedelta(days=1)
filter_task: TaskTypeDef = {
"taskType": "Filter",
"connectorOperator": {self.connector_type: "BETWEEN"}, # type: ignore
"sourceFields": [self.source_field],
"taskProperties": {
"DATA_TYPE": "datetime",
"LOWER_BOUND": str(datetime_to_epoch_ms(start_filter_date)), # NOT inclusive
"UPPER_BOUND": str(datetime_to_epoch_ms(end_filter_date)), # NOT inclusive
},
}
self.hook.update_flow_filter(
flow_name=self.flow_name, filter_tasks=[filter_task], set_trigger_ondemand=True
)
class AppflowRecordsShortCircuitOperator(ShortCircuitOperator):
"""
Short-circuit in case of a empty Appflow's run.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRecordsShortCircuitOperator`
:param flow_name: The flow name
:param appflow_run_task_id: Run task ID from where this operator should extract the execution ID
:param ignore_downstream_trigger_rules: Ignore downstream trigger rules
:param aws_conn_id: aws connection to use
:param region: aws region to use
"""
ui_color = "#33ffec" # Light blue
def __init__(
self,
*,
flow_name: str,
appflow_run_task_id: str,
ignore_downstream_trigger_rules: bool = True,
aws_conn_id: str = "aws_default",
region: str | None = None,
**kwargs,
) -> None:
super().__init__(
python_callable=self._has_new_records_func,
op_kwargs={
"flow_name": flow_name,
"appflow_run_task_id": appflow_run_task_id,
},
ignore_downstream_trigger_rules=ignore_downstream_trigger_rules,
**kwargs,
)
self.aws_conn_id = aws_conn_id
self.region = region
@staticmethod
def _get_target_execution_id(
records: list[ExecutionRecordTypeDef], execution_id: str
) -> ExecutionRecordTypeDef | None:
for record in records:
if record.get("executionId") == execution_id:
return record
return None
@cached_property
def hook(self) -> AppflowHook:
"""Create and return an AppflowHook."""
return AppflowHook(aws_conn_id=self.aws_conn_id, region_name=self.region)
def _has_new_records_func(self, **kwargs) -> bool:
appflow_task_id = kwargs["appflow_run_task_id"]
self.log.info("appflow_task_id: %s", appflow_task_id)
flow_name = kwargs["flow_name"]
self.log.info("flow_name: %s", flow_name)
af_client = self.hook.conn
task_instance = kwargs["task_instance"]
execution_id = task_instance.xcom_pull(task_ids=appflow_task_id, key="execution_id") # type: ignore
if not execution_id:
raise AirflowException(f"No execution_id found from task_id {appflow_task_id}!")
self.log.info("execution_id: %s", execution_id)
args = {"flowName": flow_name, "maxResults": 100}
response: DescribeFlowExecutionRecordsResponseTypeDef = cast(
"DescribeFlowExecutionRecordsResponseTypeDef", {}
)
record = None
while not record:
if "nextToken" in response:
response = af_client.describe_flow_execution_records(nextToken=response["nextToken"], **args)
else:
response = af_client.describe_flow_execution_records(**args)
record = AppflowRecordsShortCircuitOperator._get_target_execution_id(
response["flowExecutions"], execution_id
)
if not record and "nextToken" not in response:
raise AirflowException(f"Flow ({execution_id}) without recordsProcessed info.")
execution = record.get("executionResult", {})
if "recordsProcessed" not in execution:
raise AirflowException(f"Flow ({execution_id}) without recordsProcessed info!")
records_processed = execution["recordsProcessed"]
self.log.info("records_processed: %d", records_processed)
task_instance.xcom_push("records_processed", records_processed) # type: ignore
return records_processed > 0
| 19,662 | 37.630648 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/quicksight.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.quicksight import QuickSightHook
if TYPE_CHECKING:
from airflow.utils.context import Context
DEFAULT_CONN_ID = "aws_default"
class QuickSightCreateIngestionOperator(BaseOperator):
"""
Creates and starts a new SPICE ingestion for a dataset; also helps to Refresh existing SPICE datasets.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:QuickSightCreateIngestionOperator`
:param data_set_id: ID of the dataset used in the ingestion.
:param ingestion_id: ID for the ingestion.
:param ingestion_type: Type of ingestion. Values Can be INCREMENTAL_REFRESH or FULL_REFRESH.
Default FULL_REFRESH.
:param wait_for_completion: If wait is set to True, the time interval, in seconds,
that the operation waits to check the status of the Amazon QuickSight Ingestion.
:param check_interval: if wait is set to be true, this is the time interval
in seconds which the operator will check the status of the Amazon QuickSight Ingestion
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
"""
template_fields: Sequence[str] = (
"data_set_id",
"ingestion_id",
"ingestion_type",
"wait_for_completion",
"check_interval",
"aws_conn_id",
"region",
)
ui_color = "#ffd700"
def __init__(
self,
data_set_id: str,
ingestion_id: str,
ingestion_type: str = "FULL_REFRESH",
wait_for_completion: bool = True,
check_interval: int = 30,
aws_conn_id: str = DEFAULT_CONN_ID,
region: str | None = None,
**kwargs,
):
self.data_set_id = data_set_id
self.ingestion_id = ingestion_id
self.ingestion_type = ingestion_type
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.aws_conn_id = aws_conn_id
self.region = region
super().__init__(**kwargs)
def execute(self, context: Context):
hook = QuickSightHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region,
)
self.log.info("Running the Amazon QuickSight SPICE Ingestion on Dataset ID: %s", self.data_set_id)
return hook.create_ingestion(
data_set_id=self.data_set_id,
ingestion_id=self.ingestion_id,
ingestion_type=self.ingestion_type,
wait_for_completion=self.wait_for_completion,
check_interval=self.check_interval,
)
| 3,968 | 39.090909 | 107 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/sns.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Publish message to SNS queue."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.sns import SnsHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SnsPublishOperator(BaseOperator):
"""
Publish a message to Amazon SNS.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SnsPublishOperator`
:param aws_conn_id: aws connection to use
:param target_arn: either a TopicArn or an EndpointArn
:param message: the default message you want to send (templated)
:param subject: the message subject you want to send (templated)
:param message_attributes: the message attributes you want to send as a flat dict (data type will be
determined automatically)
"""
template_fields: Sequence[str] = ("target_arn", "message", "subject", "message_attributes", "aws_conn_id")
template_ext: Sequence[str] = ()
template_fields_renderers = {"message_attributes": "json"}
def __init__(
self,
*,
target_arn: str,
message: str,
subject: str | None = None,
message_attributes: dict | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.target_arn = target_arn
self.message = message
self.subject = subject
self.message_attributes = message_attributes
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
sns = SnsHook(aws_conn_id=self.aws_conn_id)
self.log.info(
"Sending SNS notification to %s using %s:\nsubject=%s\nattributes=%s\nmessage=%s",
self.target_arn,
self.aws_conn_id,
self.subject,
self.message_attributes,
self.message,
)
return sns.publish_to_target(
target_arn=self.target_arn,
message=self.message,
subject=self.subject,
message_attributes=self.message_attributes,
)
| 2,960 | 33.835294 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/s3.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS S3 operators."""
from __future__ import annotations
import subprocess
import sys
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.utils.helpers import exactly_one
if TYPE_CHECKING:
from airflow.utils.context import Context
BUCKET_DOES_NOT_EXIST_MSG = "Bucket with name: %s doesn't exist"
class S3CreateBucketOperator(BaseOperator):
"""
This operator creates an S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3CreateBucketOperator`
:param bucket_name: This is bucket name you want to create
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified fetched from connection.
"""
template_fields: Sequence[str] = ("bucket_name",)
def __init__(
self,
*,
bucket_name: str,
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.region_name = region_name
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
if not s3_hook.check_for_bucket(self.bucket_name):
s3_hook.create_bucket(bucket_name=self.bucket_name, region_name=self.region_name)
self.log.info("Created bucket with name: %s", self.bucket_name)
else:
self.log.info("Bucket with name: %s already exists", self.bucket_name)
class S3DeleteBucketOperator(BaseOperator):
"""
This operator deletes an S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3DeleteBucketOperator`
:param bucket_name: This is bucket name you want to delete
:param force_delete: Forcibly delete all objects in the bucket before deleting the bucket
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("bucket_name",)
def __init__(
self,
bucket_name: str,
force_delete: bool = False,
aws_conn_id: str | None = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.force_delete = force_delete
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
if s3_hook.check_for_bucket(self.bucket_name):
s3_hook.delete_bucket(bucket_name=self.bucket_name, force_delete=self.force_delete)
self.log.info("Deleted bucket with name: %s", self.bucket_name)
else:
self.log.info("Bucket with name: %s doesn't exist", self.bucket_name)
class S3GetBucketTaggingOperator(BaseOperator):
"""
This operator gets tagging from an S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3GetBucketTaggingOperator`
:param bucket_name: This is bucket name you want to reference
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("bucket_name",)
def __init__(self, bucket_name: str, aws_conn_id: str | None = "aws_default", **kwargs) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
if s3_hook.check_for_bucket(self.bucket_name):
self.log.info("Getting tags for bucket %s", self.bucket_name)
return s3_hook.get_bucket_tagging(self.bucket_name)
else:
self.log.warning(BUCKET_DOES_NOT_EXIST_MSG, self.bucket_name)
return None
class S3PutBucketTaggingOperator(BaseOperator):
"""
This operator puts tagging for an S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3PutBucketTaggingOperator`
:param bucket_name: The name of the bucket to add tags to.
:param key: The key portion of the key/value pair for a tag to be added.
If a key is provided, a value must be provided as well.
:param value: The value portion of the key/value pair for a tag to be added.
If a value is provided, a key must be provided as well.
:param tag_set: A dictionary containing the tags, or a List of key/value pairs.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("bucket_name",)
template_fields_renderers = {"tag_set": "json"}
def __init__(
self,
bucket_name: str,
key: str | None = None,
value: str | None = None,
tag_set: dict | list[dict[str, str]] | None = None,
aws_conn_id: str | None = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.key = key
self.value = value
self.tag_set = tag_set
self.bucket_name = bucket_name
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
if s3_hook.check_for_bucket(self.bucket_name):
self.log.info("Putting tags for bucket %s", self.bucket_name)
return s3_hook.put_bucket_tagging(
key=self.key, value=self.value, tag_set=self.tag_set, bucket_name=self.bucket_name
)
else:
self.log.warning(BUCKET_DOES_NOT_EXIST_MSG, self.bucket_name)
return None
class S3DeleteBucketTaggingOperator(BaseOperator):
"""
This operator deletes tagging from an S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3DeleteBucketTaggingOperator`
:param bucket_name: This is the name of the bucket to delete tags from.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("bucket_name",)
def __init__(self, bucket_name: str, aws_conn_id: str | None = "aws_default", **kwargs) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
if s3_hook.check_for_bucket(self.bucket_name):
self.log.info("Deleting tags for bucket %s", self.bucket_name)
return s3_hook.delete_bucket_tagging(self.bucket_name)
else:
self.log.warning(BUCKET_DOES_NOT_EXIST_MSG, self.bucket_name)
return None
class S3CopyObjectOperator(BaseOperator):
"""
Creates a copy of an object that is already stored in S3.
Note: the S3 connection used here needs to have access to both
source and destination bucket/key.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3CopyObjectOperator`
:param source_bucket_key: The key of the source object. (templated)
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit source_bucket_name.
:param dest_bucket_key: The key of the object to copy to. (templated)
The convention to specify `dest_bucket_key` is the same as `source_bucket_key`.
:param source_bucket_name: Name of the S3 bucket where the source object is in. (templated)
It should be omitted when `source_bucket_key` is provided as a full s3:// url.
:param dest_bucket_name: Name of the S3 bucket to where the object is copied. (templated)
It should be omitted when `dest_bucket_key` is provided as a full s3:// url.
:param source_version_id: Version ID of the source object (OPTIONAL)
:param aws_conn_id: Connection id of the S3 connection to use
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used,
but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
"""
template_fields: Sequence[str] = (
"source_bucket_key",
"dest_bucket_key",
"source_bucket_name",
"dest_bucket_name",
)
def __init__(
self,
*,
source_bucket_key: str,
dest_bucket_key: str,
source_bucket_name: str | None = None,
dest_bucket_name: str | None = None,
source_version_id: str | None = None,
aws_conn_id: str = "aws_default",
verify: str | bool | None = None,
acl_policy: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.source_bucket_key = source_bucket_key
self.dest_bucket_key = dest_bucket_key
self.source_bucket_name = source_bucket_name
self.dest_bucket_name = dest_bucket_name
self.source_version_id = source_version_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.acl_policy = acl_policy
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
s3_hook.copy_object(
self.source_bucket_key,
self.dest_bucket_key,
self.source_bucket_name,
self.dest_bucket_name,
self.source_version_id,
self.acl_policy,
)
class S3CreateObjectOperator(BaseOperator):
"""
Creates a new object from `data` as string or bytes.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3CreateObjectOperator`
:param s3_bucket: Name of the S3 bucket where to save the object. (templated)
It should be omitted when `bucket_key` is provided as a full s3:// url.
:param s3_key: The key of the object to be created. (templated)
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit bucket_name.
:param data: string or bytes to save as content.
:param replace: If True, it will overwrite the key if it already exists
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:param encoding: The string to byte encoding.
It should be specified only when `data` is provided as string.
:param compression: Type of compression to use, currently only gzip is supported.
It can be specified only when `data` is provided as string.
:param aws_conn_id: Connection id of the S3 connection to use
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used,
but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
"""
template_fields: Sequence[str] = ("s3_bucket", "s3_key", "data")
def __init__(
self,
*,
s3_bucket: str | None = None,
s3_key: str,
data: str | bytes,
replace: bool = False,
encrypt: bool = False,
acl_policy: str | None = None,
encoding: str | None = None,
compression: str | None = None,
aws_conn_id: str = "aws_default",
verify: str | bool | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.data = data
self.replace = replace
self.encrypt = encrypt
self.acl_policy = acl_policy
self.encoding = encoding
self.compression = compression
self.aws_conn_id = aws_conn_id
self.verify = verify
def execute(self, context: Context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
s3_bucket, s3_key = s3_hook.get_s3_bucket_key(self.s3_bucket, self.s3_key, "dest_bucket", "dest_key")
if isinstance(self.data, str):
s3_hook.load_string(
self.data,
s3_key,
s3_bucket,
self.replace,
self.encrypt,
self.encoding,
self.acl_policy,
self.compression,
)
else:
s3_hook.load_bytes(self.data, s3_key, s3_bucket, self.replace, self.encrypt, self.acl_policy)
class S3DeleteObjectsOperator(BaseOperator):
"""
To enable users to delete single object or multiple objects from a bucket using a single HTTP request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3DeleteObjectsOperator`
:param bucket: Name of the bucket in which you are going to delete object(s). (templated)
:param keys: The key(s) to delete from S3 bucket. (templated)
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
:param prefix: Prefix of objects to delete. (templated)
All objects matching this prefix in the bucket will be deleted.
:param aws_conn_id: Connection id of the S3 connection to use
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used,
but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
"""
template_fields: Sequence[str] = ("keys", "bucket", "prefix")
def __init__(
self,
*,
bucket: str,
keys: str | list | None = None,
prefix: str | None = None,
aws_conn_id: str = "aws_default",
verify: str | bool | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.bucket = bucket
self.keys = keys
self.prefix = prefix
self.aws_conn_id = aws_conn_id
self.verify = verify
if not exactly_one(prefix is None, keys is None):
raise AirflowException("Either keys or prefix should be set.")
def execute(self, context: Context):
if not exactly_one(self.keys is None, self.prefix is None):
raise AirflowException("Either keys or prefix should be set.")
if isinstance(self.keys, (list, str)) and not bool(self.keys):
return
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
keys = self.keys or s3_hook.list_keys(bucket_name=self.bucket, prefix=self.prefix)
if keys:
s3_hook.delete_objects(bucket=self.bucket, keys=keys)
class S3FileTransformOperator(BaseOperator):
"""
Copies data from a source S3 location to a temporary location on the local filesystem.
Runs a transformation on this file as specified by the transformation
script and uploads the output to a destination S3 location.
The locations of the source and the destination files in the local
filesystem is provided as a first and second arguments to the
transformation script. The transformation script is expected to read the
data from source, transform it and write the output to the local
destination file. The operator then takes over control and uploads the
local destination file to S3.
S3 Select is also available to filter the source contents. Users can
omit the transformation script if S3 Select expression is specified.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3FileTransformOperator`
:param source_s3_key: The key to be retrieved from S3. (templated)
:param dest_s3_key: The key to be written from S3. (templated)
:param transform_script: location of the executable transformation script
:param select_expression: S3 Select expression
:param script_args: arguments for transformation script (templated)
:param source_aws_conn_id: source s3 connection
:param source_verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
This is also applicable to ``dest_verify``.
:param dest_aws_conn_id: destination s3 connection
:param dest_verify: Whether or not to verify SSL certificates for S3 connection.
See: ``source_verify``
:param replace: Replace dest S3 key if it already exists
"""
template_fields: Sequence[str] = ("source_s3_key", "dest_s3_key", "script_args")
template_ext: Sequence[str] = ()
ui_color = "#f9c915"
def __init__(
self,
*,
source_s3_key: str,
dest_s3_key: str,
transform_script: str | None = None,
select_expression=None,
script_args: Sequence[str] | None = None,
source_aws_conn_id: str = "aws_default",
source_verify: bool | str | None = None,
dest_aws_conn_id: str = "aws_default",
dest_verify: bool | str | None = None,
replace: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_s3_key = source_s3_key
self.source_aws_conn_id = source_aws_conn_id
self.source_verify = source_verify
self.dest_s3_key = dest_s3_key
self.dest_aws_conn_id = dest_aws_conn_id
self.dest_verify = dest_verify
self.replace = replace
self.transform_script = transform_script
self.select_expression = select_expression
self.script_args = script_args or []
self.output_encoding = sys.getdefaultencoding()
def execute(self, context: Context):
if self.transform_script is None and self.select_expression is None:
raise AirflowException("Either transform_script or select_expression must be specified")
source_s3 = S3Hook(aws_conn_id=self.source_aws_conn_id, verify=self.source_verify)
dest_s3 = S3Hook(aws_conn_id=self.dest_aws_conn_id, verify=self.dest_verify)
self.log.info("Downloading source S3 file %s", self.source_s3_key)
if not source_s3.check_for_key(self.source_s3_key):
raise AirflowException(f"The source key {self.source_s3_key} does not exist")
source_s3_key_object = source_s3.get_key(self.source_s3_key)
with NamedTemporaryFile("wb") as f_source, NamedTemporaryFile("wb") as f_dest:
self.log.info("Dumping S3 file %s contents to local file %s", self.source_s3_key, f_source.name)
if self.select_expression is not None:
content = source_s3.select_key(key=self.source_s3_key, expression=self.select_expression)
f_source.write(content.encode("utf-8"))
else:
source_s3_key_object.download_fileobj(Fileobj=f_source)
f_source.flush()
if self.transform_script is not None:
with subprocess.Popen(
[self.transform_script, f_source.name, f_dest.name, *self.script_args],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
) as process:
self.log.info("Output:")
if process.stdout is not None:
for line in iter(process.stdout.readline, b""):
self.log.info(line.decode(self.output_encoding).rstrip())
process.wait()
if process.returncode:
raise AirflowException(f"Transform script failed: {process.returncode}")
else:
self.log.info(
"Transform script successful. Output temporarily located at %s", f_dest.name
)
self.log.info("Uploading transformed file to S3")
f_dest.flush()
dest_s3.load_file(
filename=f_dest.name if self.transform_script else f_source.name,
key=self.dest_s3_key,
replace=self.replace,
)
self.log.info("Upload successful")
class S3ListOperator(BaseOperator):
"""
List all objects from the bucket with the given string prefix in name.
This operator returns a python list with the name of objects which can be
used by `xcom` in the downstream task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ListOperator`
:param bucket: The S3 bucket where to find the objects. (templated)
:param prefix: Prefix string to filters the objects whose name begin with
such prefix. (templated)
:param delimiter: the delimiter marks key hierarchy. (templated)
:param aws_conn_id: The connection ID to use when connecting to S3 storage.
:param verify: Whether or not to verify SSL certificates for S3 connection.
:param apply_wildcard: whether to treat '*' as a wildcard or a plain symbol in the prefix.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
**Example**:
The following operator would list all the files
(excluding subfolders) from the S3
``customers/2018/04/`` key in the ``data`` bucket. ::
s3_file = S3ListOperator(
task_id='list_3s_files',
bucket='data',
prefix='customers/2018/04/',
delimiter='/',
aws_conn_id='aws_customers_conn'
)
"""
template_fields: Sequence[str] = ("bucket", "prefix", "delimiter")
ui_color = "#ffd700"
def __init__(
self,
*,
bucket: str,
prefix: str = "",
delimiter: str = "",
aws_conn_id: str = "aws_default",
verify: str | bool | None = None,
apply_wildcard: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.aws_conn_id = aws_conn_id
self.verify = verify
self.apply_wildcard = apply_wildcard
def execute(self, context: Context):
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
self.log.info(
"Getting the list of files from bucket: %s in prefix: %s (Delimiter %s)",
self.bucket,
self.prefix,
self.delimiter,
)
return hook.list_keys(
bucket_name=self.bucket,
prefix=self.prefix,
delimiter=self.delimiter,
apply_wildcard=self.apply_wildcard,
)
class S3ListPrefixesOperator(BaseOperator):
"""
List all subfolders from the bucket with the given string prefix in name.
This operator returns a python list with the name of all subfolders which
can be used by `xcom` in the downstream task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ListPrefixesOperator`
:param bucket: The S3 bucket where to find the subfolders. (templated)
:param prefix: Prefix string to filter the subfolders whose name begin with
such prefix. (templated)
:param delimiter: the delimiter marks subfolder hierarchy. (templated)
:param aws_conn_id: The connection ID to use when connecting to S3 storage.
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
**Example**:
The following operator would list all the subfolders
from the S3 ``customers/2018/04/`` prefix in the ``data`` bucket. ::
s3_file = S3ListPrefixesOperator(
task_id='list_s3_prefixes',
bucket='data',
prefix='customers/2018/04/',
delimiter='/',
aws_conn_id='aws_customers_conn'
)
"""
template_fields: Sequence[str] = ("bucket", "prefix", "delimiter")
ui_color = "#ffd700"
def __init__(
self,
*,
bucket: str,
prefix: str,
delimiter: str,
aws_conn_id: str = "aws_default",
verify: str | bool | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.aws_conn_id = aws_conn_id
self.verify = verify
def execute(self, context: Context):
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
self.log.info(
"Getting the list of subfolders from bucket: %s in prefix: %s (Delimiter %s)",
self.bucket,
self.prefix,
self.delimiter,
)
return hook.list_prefixes(bucket_name=self.bucket, prefix=self.prefix, delimiter=self.delimiter)
| 30,235 | 38.369792 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/operators/dms.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.dms import DmsHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class DmsCreateTaskOperator(BaseOperator):
"""
Creates AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsCreateTaskOperator`
:param replication_task_id: Replication task id
:param source_endpoint_arn: Source endpoint ARN
:param target_endpoint_arn: Target endpoint ARN
:param replication_instance_arn: Replication instance ARN
:param table_mappings: Table mappings
:param migration_type: Migration type ('full-load'|'cdc'|'full-load-and-cdc'), full-load by default.
:param create_task_kwargs: Extra arguments for DMS replication task creation.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = (
"replication_task_id",
"source_endpoint_arn",
"target_endpoint_arn",
"replication_instance_arn",
"table_mappings",
"migration_type",
"create_task_kwargs",
)
template_ext: Sequence[str] = ()
template_fields_renderers = {
"table_mappings": "json",
"create_task_kwargs": "json",
}
def __init__(
self,
*,
replication_task_id: str,
source_endpoint_arn: str,
target_endpoint_arn: str,
replication_instance_arn: str,
table_mappings: dict,
migration_type: str = "full-load",
create_task_kwargs: dict | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_id = replication_task_id
self.source_endpoint_arn = source_endpoint_arn
self.target_endpoint_arn = target_endpoint_arn
self.replication_instance_arn = replication_instance_arn
self.migration_type = migration_type
self.table_mappings = table_mappings
self.create_task_kwargs = create_task_kwargs or {}
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
"""
Creates AWS DMS replication task from Airflow.
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
task_arn = dms_hook.create_replication_task(
replication_task_id=self.replication_task_id,
source_endpoint_arn=self.source_endpoint_arn,
target_endpoint_arn=self.target_endpoint_arn,
replication_instance_arn=self.replication_instance_arn,
migration_type=self.migration_type,
table_mappings=self.table_mappings,
**self.create_task_kwargs,
)
self.log.info("DMS replication task(%s) is ready.", self.replication_task_id)
return task_arn
class DmsDeleteTaskOperator(BaseOperator):
"""
Deletes AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsDeleteTaskOperator`
:param replication_task_arn: Replication task ARN
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("replication_task_arn",)
template_ext: Sequence[str] = ()
template_fields_renderers: dict[str, str] = {}
def __init__(
self,
*,
replication_task_arn: str | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
"""
Deletes AWS DMS replication task from Airflow.
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
dms_hook.delete_replication_task(replication_task_arn=self.replication_task_arn)
self.log.info("DMS replication task(%s) has been deleted.", self.replication_task_arn)
class DmsDescribeTasksOperator(BaseOperator):
"""
Describes AWS DMS replication tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsDescribeTasksOperator`
:param describe_tasks_kwargs: Describe tasks command arguments
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("describe_tasks_kwargs",)
template_ext: Sequence[str] = ()
template_fields_renderers: dict[str, str] = {"describe_tasks_kwargs": "json"}
def __init__(
self,
*,
describe_tasks_kwargs: dict | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.describe_tasks_kwargs = describe_tasks_kwargs or {}
self.aws_conn_id = aws_conn_id
def execute(self, context: Context) -> tuple[str | None, list]:
"""
Describes AWS DMS replication tasks from Airflow.
:return: Marker and list of replication tasks
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
return dms_hook.describe_replication_tasks(**self.describe_tasks_kwargs)
class DmsStartTaskOperator(BaseOperator):
"""
Starts AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsStartTaskOperator`
:param replication_task_arn: Replication task ARN
:param start_replication_task_type: Replication task start type (default='start-replication')
('start-replication'|'resume-processing'|'reload-target')
:param start_task_kwargs: Extra start replication task arguments
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = (
"replication_task_arn",
"start_replication_task_type",
"start_task_kwargs",
)
template_ext: Sequence[str] = ()
template_fields_renderers = {"start_task_kwargs": "json"}
def __init__(
self,
*,
replication_task_arn: str,
start_replication_task_type: str = "start-replication",
start_task_kwargs: dict | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
self.start_replication_task_type = start_replication_task_type
self.start_task_kwargs = start_task_kwargs or {}
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
"""
Starts AWS DMS replication task from Airflow.
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
dms_hook.start_replication_task(
replication_task_arn=self.replication_task_arn,
start_replication_task_type=self.start_replication_task_type,
**self.start_task_kwargs,
)
self.log.info("DMS replication task(%s) is starting.", self.replication_task_arn)
class DmsStopTaskOperator(BaseOperator):
"""
Stops AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsStopTaskOperator`
:param replication_task_arn: Replication task ARN
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
"""
template_fields: Sequence[str] = ("replication_task_arn",)
template_ext: Sequence[str] = ()
template_fields_renderers: dict[str, str] = {}
def __init__(
self,
*,
replication_task_arn: str | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
self.aws_conn_id = aws_conn_id
def execute(self, context: Context):
"""
Stops AWS DMS replication task from Airflow.
:return: replication task arn
"""
dms_hook = DmsHook(aws_conn_id=self.aws_conn_id)
dms_hook.stop_replication_task(replication_task_arn=self.replication_task_arn)
self.log.info("DMS replication task(%s) is stopping.", self.replication_task_arn)
| 10,720 | 35.715753 | 104 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/emr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import warnings
from typing import Any
from botocore.exceptions import WaiterError
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.hooks.emr import EmrContainerHook, EmrHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
from airflow.triggers.base import BaseTrigger, TriggerEvent
class EmrAddStepsTrigger(BaseTrigger):
"""
Asynchronously poll the boto3 API and wait for the steps to finish executing.
:param job_flow_id: The id of the job flow.
:param step_ids: The id of the steps being waited upon.
:param poll_interval: The amount of time in seconds to wait between attempts.
:param max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
job_flow_id: str,
step_ids: list[str],
aws_conn_id: str,
max_attempts: int | None,
poll_interval: int | None,
):
self.job_flow_id = job_flow_id
self.step_ids = step_ids
self.aws_conn_id = aws_conn_id
self.max_attempts = max_attempts
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.amazon.aws.triggers.emr.EmrAddStepsTrigger",
{
"job_flow_id": str(self.job_flow_id),
"step_ids": self.step_ids,
"poll_interval": str(self.poll_interval),
"max_attempts": str(self.max_attempts),
"aws_conn_id": str(self.aws_conn_id),
},
)
async def run(self):
self.hook = EmrHook(aws_conn_id=self.aws_conn_id)
async with self.hook.async_conn as client:
for step_id in self.step_ids:
attempt = 0
waiter = client.get_waiter("step_complete")
while attempt < int(self.max_attempts):
attempt += 1
try:
await waiter.wait(
ClusterId=self.job_flow_id,
StepId=step_id,
WaiterConfig={
"Delay": int(self.poll_interval),
"MaxAttempts": 1,
},
)
break
except WaiterError as error:
if "terminal failure" in str(error):
yield TriggerEvent(
{"status": "failure", "message": f"Step {step_id} failed: {error}"}
)
break
self.log.info(
"Status of step is %s - %s",
error.last_response["Step"]["Status"]["State"],
error.last_response["Step"]["Status"]["StateChangeReason"],
)
await asyncio.sleep(int(self.poll_interval))
if attempt >= int(self.max_attempts):
yield TriggerEvent({"status": "failure", "message": "Steps failed: max attempts reached"})
else:
yield TriggerEvent({"status": "success", "message": "Steps completed", "step_ids": self.step_ids})
class EmrCreateJobFlowTrigger(AwsBaseWaiterTrigger):
"""
Asynchronously poll the boto3 API and wait for the JobFlow to finish executing.
:param job_flow_id: The id of the job flow to wait for.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
job_flow_id: str,
poll_interval: int | None = None, # deprecated
max_attempts: int | None = None, # deprecated
aws_conn_id: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
):
if poll_interval is not None or max_attempts is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval "
"and waiter_max_attempts instead of max_attempts",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
waiter_max_attempts = max_attempts or waiter_max_attempts
super().__init__(
serialized_fields={"job_flow_id": job_flow_id},
waiter_name="job_flow_waiting",
waiter_args={"ClusterId": job_flow_id},
failure_message="JobFlow creation failed",
status_message="JobFlow creation in progress",
status_queries=[
"Cluster.Status.State",
"Cluster.Status.StateChangeReason",
"Cluster.Status.ErrorDetails",
],
return_key="job_flow_id",
return_value=job_flow_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrHook(aws_conn_id=self.aws_conn_id)
class EmrTerminateJobFlowTrigger(AwsBaseWaiterTrigger):
"""
Asynchronously poll the boto3 API and wait for the JobFlow to finish terminating.
:param job_flow_id: ID of the EMR Job Flow to terminate
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
job_flow_id: str,
poll_interval: int | None = None, # deprecated
max_attempts: int | None = None, # deprecated
aws_conn_id: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
):
if poll_interval is not None or max_attempts is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval "
"and waiter_max_attempts instead of max_attempts",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
waiter_max_attempts = max_attempts or waiter_max_attempts
super().__init__(
serialized_fields={"job_flow_id": job_flow_id},
waiter_name="job_flow_terminated",
waiter_args={"ClusterId": job_flow_id},
failure_message="JobFlow termination failed",
status_message="JobFlow termination in progress",
status_queries=[
"Cluster.Status.State",
"Cluster.Status.StateChangeReason",
"Cluster.Status.ErrorDetails",
],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrHook(aws_conn_id=self.aws_conn_id)
class EmrContainerTrigger(AwsBaseWaiterTrigger):
"""
Poll for the status of EMR container until reaches terminal state.
:param virtual_cluster_id: Reference Emr cluster id
:param job_id: job_id to check the state
:param aws_conn_id: Reference to AWS connection id
:param waiter_delay: polling period in seconds to check for the status
"""
def __init__(
self,
virtual_cluster_id: str,
job_id: str,
aws_conn_id: str = "aws_default",
poll_interval: int | None = None, # deprecated
waiter_delay: int = 30,
waiter_max_attempts: int = 600,
):
if poll_interval is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
super().__init__(
serialized_fields={"virtual_cluster_id": virtual_cluster_id, "job_id": job_id},
waiter_name="container_job_complete",
waiter_args={"id": job_id, "virtualClusterId": virtual_cluster_id},
failure_message="Job failed",
status_message="Job in progress",
status_queries=["jobRun.state", "jobRun.failureReason"],
return_key="job_id",
return_value=job_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrContainerHook(self.aws_conn_id)
class EmrStepSensorTrigger(AwsBaseWaiterTrigger):
"""
Poll for the status of EMR container until reaches terminal state.
:param job_flow_id: job_flow_id which contains the step check the state of
:param step_id: step to check the state of
:param waiter_delay: polling period in seconds to check for the status
:param waiter_max_attempts: The maximum number of attempts to be made
:param aws_conn_id: Reference to AWS connection id
"""
def __init__(
self,
job_flow_id: str,
step_id: str,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
aws_conn_id: str = "aws_default",
):
super().__init__(
serialized_fields={"job_flow_id": job_flow_id, "step_id": step_id},
waiter_name="step_wait_for_terminal",
waiter_args={"ClusterId": job_flow_id, "StepId": step_id},
failure_message=f"Error while waiting for step {step_id} to complete",
status_message=f"Step id: {step_id}, Step is still in non-terminal state",
status_queries=[
"Step.Status.State",
"Step.Status.FailureDetails",
"Step.Status.StateChangeReason",
],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrHook(self.aws_conn_id)
| 11,320 | 38.583916 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from abc import abstractmethod
from typing import Any, AsyncIterator
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.utils.waiter_with_logging import async_wait
from airflow.triggers.base import BaseTrigger, TriggerEvent
class AwsBaseWaiterTrigger(BaseTrigger):
"""
Base class for all AWS Triggers that follow the "standard" model of just waiting on a waiter.
Subclasses need to implement the hook() method.
:param serialized_fields: Fields that are specific to the subclass trigger and need to be serialized
to be passed to the __init__ method on deserialization.
The conn id, region, and waiter delay & attempts are always serialized.
format: {<parameter_name>: <parameter_value>}
:param waiter_name: The name of the (possibly custom) boto waiter to use.
:param waiter_args: The arguments to pass to the waiter.
:param failure_message: The message to log if a failure state is reached.
:param status_message: The message logged when printing the status of the service.
:param status_queries: A list containing the JMESPath queries to retrieve status information from
the waiter response. See https://jmespath.org/tutorial.html
:param return_key: The key to use for the return_value in the TriggerEvent this emits on success.
Defaults to "value".
:param return_value: A value that'll be returned in the return_key field of the TriggerEvent.
Set to None if there is nothing to return.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials. To be used to build the hook.
:param region_name: The AWS region where the resources to watch are. To be used to build the hook.
"""
def __init__(
self,
*,
serialized_fields: dict[str, Any],
waiter_name: str,
waiter_args: dict[str, Any],
failure_message: str,
status_message: str,
status_queries: list[str],
return_key: str = "value",
return_value: Any,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str | None,
region_name: str | None = None,
):
# parameters that should be hardcoded in the child's implem
self.serialized_fields = serialized_fields
self.waiter_name = waiter_name
self.waiter_args = waiter_args
self.failure_message = failure_message
self.status_message = status_message
self.status_queries = status_queries
self.return_key = return_key
self.return_value = return_value
# parameters that should be passed directly from the child's parameters
self.waiter_delay = waiter_delay
self.attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def serialize(self) -> tuple[str, dict[str, Any]]:
# here we put together the "common" params,
# and whatever extras we got from the subclass in serialized_fields
params = dict(
{
"waiter_delay": self.waiter_delay,
"waiter_max_attempts": self.attempts,
"aws_conn_id": self.aws_conn_id,
},
**self.serialized_fields,
)
if self.region_name:
# if we serialize the None value from this, it breaks subclasses that don't have it in their ctor.
params["region_name"] = self.region_name
return (
# remember that self is an instance of the subclass here, not of this class.
self.__class__.__module__ + "." + self.__class__.__qualname__,
params,
)
@abstractmethod
def hook(self) -> AwsGenericHook:
"""Override in subclasses to return the right hook."""
...
async def run(self) -> AsyncIterator[TriggerEvent]:
hook = self.hook()
async with hook.async_conn as client:
waiter = hook.get_waiter(self.waiter_name, deferrable=True, client=client)
await async_wait(
waiter,
self.waiter_delay,
self.attempts,
self.waiter_args,
self.failure_message,
self.status_message,
self.status_queries,
)
yield TriggerEvent({"status": "success", self.return_key: self.return_value})
| 5,403 | 40.251908 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/glue.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, AsyncIterator
from airflow.providers.amazon.aws.hooks.glue import GlueJobHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class GlueJobCompleteTrigger(BaseTrigger):
"""
Watches for a glue job, triggers when it finishes.
:param job_name: glue job name
:param run_id: the ID of the specific run to watch for that job
:param verbose: whether to print the job's logs in airflow logs or not
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
job_name: str,
run_id: str,
verbose: bool,
aws_conn_id: str,
job_poll_interval: int | float,
):
super().__init__()
self.job_name = job_name
self.run_id = run_id
self.verbose = verbose
self.aws_conn_id = aws_conn_id
self.job_poll_interval = job_poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
# dynamically generate the fully qualified name of the class
self.__class__.__module__ + "." + self.__class__.__qualname__,
{
"job_name": self.job_name,
"run_id": self.run_id,
"verbose": str(self.verbose),
"aws_conn_id": self.aws_conn_id,
"job_poll_interval": self.job_poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
hook = GlueJobHook(aws_conn_id=self.aws_conn_id, job_poll_interval=self.job_poll_interval)
await hook.async_job_completion(self.job_name, self.run_id, self.verbose)
yield TriggerEvent({"status": "success", "message": "Job done", "value": self.run_id})
| 2,567 | 36.764706 | 98 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/sagemaker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import Any
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.providers.amazon.aws.utils.waiter_with_logging import async_wait
from airflow.triggers.base import BaseTrigger, TriggerEvent
class SageMakerTrigger(BaseTrigger):
"""
SageMakerTrigger is fired as deferred class with params to run the task in triggerer.
:param job_name: name of the job to check status
:param job_type: Type of the sagemaker job whether it is Transform or Training
:param poke_interval: polling period in seconds to check for the status
:param max_attempts: Number of times to poll for query state before returning the current state,
defaults to None.
:param aws_conn_id: AWS connection ID for sagemaker
"""
def __init__(
self,
job_name: str,
job_type: str,
poke_interval: int = 30,
max_attempts: int = 480,
aws_conn_id: str = "aws_default",
):
super().__init__()
self.job_name = job_name
self.job_type = job_type
self.poke_interval = poke_interval
self.max_attempts = max_attempts
self.aws_conn_id = aws_conn_id
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes SagemakerTrigger arguments and classpath."""
return (
"airflow.providers.amazon.aws.triggers.sagemaker.SageMakerTrigger",
{
"job_name": self.job_name,
"job_type": self.job_type,
"poke_interval": self.poke_interval,
"max_attempts": self.max_attempts,
"aws_conn_id": self.aws_conn_id,
},
)
@cached_property
def hook(self) -> SageMakerHook:
return SageMakerHook(aws_conn_id=self.aws_conn_id)
@staticmethod
def _get_job_type_waiter(job_type: str) -> str:
return {
"training": "TrainingJobComplete",
"transform": "TransformJobComplete",
"processing": "ProcessingJobComplete",
"tuning": "TuningJobComplete",
"endpoint": "endpoint_in_service", # this one is provided by boto
}[job_type.lower()]
@staticmethod
def _get_waiter_arg_name(job_type: str) -> str:
return {
"training": "TrainingJobName",
"transform": "TransformJobName",
"processing": "ProcessingJobName",
"tuning": "HyperParameterTuningJobName",
"endpoint": "EndpointName",
}[job_type.lower()]
@staticmethod
def _get_response_status_key(job_type: str) -> str:
return {
"training": "TrainingJobStatus",
"transform": "TransformJobStatus",
"processing": "ProcessingJobStatus",
"tuning": "HyperParameterTuningJobStatus",
"endpoint": "EndpointStatus",
}[job_type.lower()]
async def run(self):
self.log.info("job name is %s and job type is %s", self.job_name, self.job_type)
async with self.hook.async_conn as client:
waiter = self.hook.get_waiter(
self._get_job_type_waiter(self.job_type), deferrable=True, client=client
)
await async_wait(
waiter=waiter,
waiter_delay=self.poke_interval,
waiter_max_attempts=self.max_attempts,
args={self._get_waiter_arg_name(self.job_type): self.job_name},
failure_message=f"Error while waiting for {self.job_type} job",
status_message=f"{self.job_type} job not done yet",
status_args=[self._get_response_status_key(self.job_type)],
)
yield TriggerEvent({"status": "success", "message": "Job completed."})
| 4,626 | 38.211864 | 100 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/redshift_cluster.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.hooks.redshift_cluster import RedshiftHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
class RedshiftCreateClusterTrigger(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftCreateClusterOperator.
The trigger will asynchronously poll the boto3 API and wait for the
Redshift cluster to be in the `available` state.
:param cluster_identifier: A unique identifier for the cluster.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_identifier: str,
poll_interval: int | None = None,
max_attempt: int | None = None,
aws_conn_id: str = "aws_default",
waiter_delay: int = 15,
waiter_max_attempts: int = 999999,
):
if poll_interval is not None or max_attempt is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval "
"and waiter_max_attempts instead of max_attempt.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
waiter_max_attempts = max_attempt or waiter_max_attempts
super().__init__(
serialized_fields={"cluster_identifier": cluster_identifier},
waiter_name="cluster_available",
waiter_args={"ClusterIdentifier": cluster_identifier},
failure_message="Error while creating the redshift cluster",
status_message="Redshift cluster creation in progress",
status_queries=["Clusters[].ClusterStatus"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
class RedshiftPauseClusterTrigger(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftPauseClusterOperator.
The trigger will asynchronously poll the boto3 API and wait for the
Redshift cluster to be in the `paused` state.
:param cluster_identifier: A unique identifier for the cluster.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_identifier: str,
poll_interval: int | None = None,
max_attempts: int | None = None,
aws_conn_id: str = "aws_default",
waiter_delay: int = 15,
waiter_max_attempts: int = 999999,
):
if poll_interval is not None or max_attempts is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval "
"and waiter_max_attempts instead of max_attempt.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
waiter_max_attempts = max_attempts or waiter_max_attempts
super().__init__(
serialized_fields={"cluster_identifier": cluster_identifier},
waiter_name="cluster_paused",
waiter_args={"ClusterIdentifier": cluster_identifier},
failure_message="Error while pausing the redshift cluster",
status_message="Redshift cluster pausing in progress",
status_queries=["Clusters[].ClusterStatus"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
class RedshiftCreateClusterSnapshotTrigger(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftCreateClusterSnapshotOperator.
The trigger will asynchronously poll the boto3 API and wait for the
Redshift cluster snapshot to be in the `available` state.
:param cluster_identifier: A unique identifier for the cluster.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_identifier: str,
poll_interval: int | None = None,
max_attempts: int | None = None,
aws_conn_id: str = "aws_default",
waiter_delay: int = 15,
waiter_max_attempts: int = 999999,
):
if poll_interval is not None or max_attempts is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval "
"and waiter_max_attempts instead of max_attempt.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
waiter_max_attempts = max_attempts or waiter_max_attempts
super().__init__(
serialized_fields={"cluster_identifier": cluster_identifier},
waiter_name="snapshot_available",
waiter_args={"ClusterIdentifier": cluster_identifier},
failure_message="Create Cluster Snapshot Failed",
status_message="Redshift Cluster Snapshot in progress",
status_queries=["Clusters[].ClusterStatus"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
class RedshiftResumeClusterTrigger(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftResumeClusterOperator.
The trigger will asynchronously poll the boto3 API and wait for the
Redshift cluster to be in the `available` state.
:param cluster_identifier: A unique identifier for the cluster.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_identifier: str,
poll_interval: int | None = None,
max_attempts: int | None = None,
aws_conn_id: str = "aws_default",
waiter_delay: int = 15,
waiter_max_attempts: int = 999999,
):
if poll_interval is not None or max_attempts is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval "
"and waiter_max_attempts instead of max_attempt.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
waiter_max_attempts = max_attempts or waiter_max_attempts
super().__init__(
serialized_fields={"cluster_identifier": cluster_identifier},
waiter_name="cluster_resumed",
waiter_args={"ClusterIdentifier": cluster_identifier},
failure_message="Resume Cluster Snapshot Failed",
status_message="Redshift Cluster resuming in progress",
status_queries=["Clusters[].ClusterStatus"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
class RedshiftDeleteClusterTrigger(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftDeleteClusterOperator.
:param cluster_identifier: A unique identifier for the cluster.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param waiter_delay: The amount of time in seconds to wait between attempts.
"""
def __init__(
self,
cluster_identifier: str,
poll_interval: int | None = None,
max_attempts: int | None = None,
aws_conn_id: str = "aws_default",
waiter_delay: int = 30,
waiter_max_attempts: int = 30,
):
if poll_interval is not None or max_attempts is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval "
"and waiter_max_attempts instead of max_attempt.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
waiter_max_attempts = max_attempts or waiter_max_attempts
super().__init__(
serialized_fields={"cluster_identifier": cluster_identifier},
waiter_name="cluster_deleted",
waiter_args={"ClusterIdentifier": cluster_identifier},
failure_message="Delete Cluster Failed",
status_message="Redshift Cluster deletion in progress",
status_queries=["Clusters[].ClusterStatus"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
| 10,704 | 39.858779 | 80 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/athena.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.providers.amazon.aws.hooks.athena import AthenaHook
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
class AthenaTrigger(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftCreateClusterOperator.
The trigger will asynchronously poll the boto3 API and wait for the
Redshift cluster to be in the `available` state.
:param query_execution_id: ID of the Athena query execution to watch
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
query_execution_id: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
):
super().__init__(
serialized_fields={"query_execution_id": query_execution_id},
waiter_name="query_complete",
waiter_args={"QueryExecutionId": query_execution_id},
failure_message=f"Error while waiting for query {query_execution_id} to complete",
status_message=f"Query execution id: {query_execution_id}",
status_queries=["QueryExecution.Status"],
return_value=query_execution_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return AthenaHook(self.aws_conn_id)
| 2,427 | 40.152542 | 94 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/batch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from functools import cached_property
from typing import Any
from botocore.exceptions import WaiterError
from deprecated import deprecated
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.hooks.batch_client import BatchClientHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
from airflow.triggers.base import BaseTrigger, TriggerEvent
@deprecated(reason="use BatchJobTrigger instead")
class BatchOperatorTrigger(BaseTrigger):
"""
Asynchronously poll the boto3 API and wait for the Batch job to be in the `SUCCEEDED` state.
:param job_id: A unique identifier for the cluster.
:param max_retries: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: region name to use in AWS Hook
:param poll_interval: The amount of time in seconds to wait between attempts.
"""
def __init__(
self,
job_id: str | None = None,
max_retries: int = 10,
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
poll_interval: int = 30,
):
super().__init__()
self.job_id = job_id
self.max_retries = max_retries
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BatchOperatorTrigger arguments and classpath."""
return (
"airflow.providers.amazon.aws.triggers.batch.BatchOperatorTrigger",
{
"job_id": self.job_id,
"max_retries": self.max_retries,
"aws_conn_id": self.aws_conn_id,
"region_name": self.region_name,
"poll_interval": self.poll_interval,
},
)
@cached_property
def hook(self) -> BatchClientHook:
return BatchClientHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
async def run(self):
async with self.hook.async_conn as client:
waiter = self.hook.get_waiter("batch_job_complete", deferrable=True, client=client)
attempt = 0
while attempt < self.max_retries:
attempt = attempt + 1
try:
await waiter.wait(
jobs=[self.job_id],
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": 1,
},
)
break
except WaiterError as error:
if "terminal failure" in str(error):
yield TriggerEvent(
{"status": "failure", "message": f"Delete Cluster Failed: {error}"}
)
break
self.log.info(
"Job status is %s. Retrying attempt %s/%s",
error.last_response["jobs"][0]["status"],
attempt,
self.max_retries,
)
await asyncio.sleep(int(self.poll_interval))
if attempt >= self.max_retries:
yield TriggerEvent({"status": "failure", "message": "Job Failed - max attempts reached."})
else:
yield TriggerEvent({"status": "success", "job_id": self.job_id})
@deprecated(reason="use BatchJobTrigger instead")
class BatchSensorTrigger(BaseTrigger):
"""
Checks for the status of a submitted job_id to AWS Batch until it reaches a failure or a success state.
BatchSensorTrigger is fired as deferred class with params to poll the job state in Triggerer.
:param job_id: the job ID, to poll for job completion or not
:param region_name: AWS region name to use
Override the region_name in connection (if provided)
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
:param poke_interval: polling period in seconds to check for the status of the job
"""
def __init__(
self,
job_id: str,
region_name: str | None,
aws_conn_id: str | None = "aws_default",
poke_interval: float = 5,
):
super().__init__()
self.job_id = job_id
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.poke_interval = poke_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BatchSensorTrigger arguments and classpath."""
return (
"airflow.providers.amazon.aws.triggers.batch.BatchSensorTrigger",
{
"job_id": self.job_id,
"aws_conn_id": self.aws_conn_id,
"region_name": self.region_name,
"poke_interval": self.poke_interval,
},
)
@cached_property
def hook(self) -> BatchClientHook:
return BatchClientHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
async def run(self):
"""
Make async connection using aiobotocore library to AWS Batch, periodically poll for the job status.
The status that indicates job completion are: 'SUCCEEDED'|'FAILED'.
"""
async with self.hook.async_conn as client:
waiter = self.hook.get_waiter("batch_job_complete", deferrable=True, client=client)
attempt = 0
while True:
attempt = attempt + 1
try:
await waiter.wait(
jobs=[self.job_id],
WaiterConfig={
"Delay": int(self.poke_interval),
"MaxAttempts": 1,
},
)
break
except WaiterError as error:
if "error" in str(error):
yield TriggerEvent({"status": "failure", "message": f"Job Failed: {error}"})
break
self.log.info(
"Job response is %s. Retrying attempt %s",
error.last_response["Error"]["Message"],
attempt,
)
await asyncio.sleep(int(self.poke_interval))
yield TriggerEvent(
{
"status": "success",
"job_id": self.job_id,
"message": f"Job {self.job_id} Succeeded",
}
)
class BatchJobTrigger(AwsBaseWaiterTrigger):
"""
Checks for the status of a submitted job_id to AWS Batch until it reaches a failure or a success state.
:param job_id: the job ID, to poll for job completion or not
:param region_name: AWS region name to use
Override the region_name in connection (if provided)
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
:param waiter_delay: polling period in seconds to check for the status of the job
:param waiter_max_attempts: The maximum number of attempts to be made.
"""
def __init__(
self,
job_id: str | None,
region_name: str | None,
aws_conn_id: str | None = "aws_default",
waiter_delay: int = 5,
waiter_max_attempts: int = 720,
):
super().__init__(
serialized_fields={"job_id": job_id},
waiter_name="batch_job_complete",
waiter_args={"jobs": [job_id]},
failure_message=f"Failure while running batch job {job_id}",
status_message=f"Batch job {job_id} not ready yet",
status_queries=["jobs[].status", "computeEnvironments[].statusReason"],
return_key="job_id",
return_value=job_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return BatchClientHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class BatchCreateComputeEnvironmentTrigger(AwsBaseWaiterTrigger):
"""
Asynchronously poll the boto3 API and wait for the compute environment to be ready.
:param compute_env_arn: The ARN of the compute env.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: region name to use in AWS Hook
:param waiter_delay: The amount of time in seconds to wait between attempts.
"""
def __init__(
self,
compute_env_arn: str,
waiter_delay: int = 30,
waiter_max_attempts: int = 10,
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
):
super().__init__(
serialized_fields={"compute_env_arn": compute_env_arn},
waiter_name="compute_env_ready",
waiter_args={"computeEnvironments": [compute_env_arn]},
failure_message="Failure while creating Compute Environment",
status_message="Compute Environment not ready yet",
status_queries=["computeEnvironments[].status", "computeEnvironments[].statusReason"],
return_value=compute_env_arn,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return BatchClientHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
| 10,706 | 38.509225 | 107 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/ecs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator
from botocore.exceptions import ClientError, WaiterError
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.hooks.ecs import EcsHook
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
from airflow.providers.amazon.aws.utils.task_log_fetcher import AwsTaskLogFetcher
from airflow.triggers.base import BaseTrigger, TriggerEvent
class ClusterActiveTrigger(AwsBaseWaiterTrigger):
"""
Polls the status of a cluster until it's active.
:param cluster_arn: ARN of the cluster to watch.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The number of times to ping for status.
Will fail after that many unsuccessful attempts.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: The AWS region where the cluster is located.
"""
def __init__(
self,
cluster_arn: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str | None,
region_name: str | None,
):
super().__init__(
serialized_fields={"cluster_arn": cluster_arn},
waiter_name="cluster_active",
waiter_args={"clusters": [cluster_arn]},
failure_message="Failure while waiting for cluster to be available",
status_message="Cluster is not ready yet",
status_queries=["clusters[].status", "failures"],
return_key="arn",
return_value=cluster_arn,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EcsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class ClusterInactiveTrigger(AwsBaseWaiterTrigger):
"""
Polls the status of a cluster until it's inactive.
:param cluster_arn: ARN of the cluster to watch.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The number of times to ping for status.
Will fail after that many unsuccessful attempts.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: The AWS region where the cluster is located.
"""
def __init__(
self,
cluster_arn: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str | None,
region_name: str | None,
):
super().__init__(
serialized_fields={"cluster_arn": cluster_arn},
waiter_name="cluster_inactive",
waiter_args={"clusters": [cluster_arn]},
failure_message="Failure while waiting for cluster to be deactivated",
status_message="Cluster deactivation is not done yet",
status_queries=["clusters[].status", "failures"],
return_value=cluster_arn,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EcsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class TaskDoneTrigger(BaseTrigger):
"""
Waits for an ECS task to be done, while eventually polling logs.
:param cluster: short name or full ARN of the cluster where the task is running.
:param task_arn: ARN of the task to watch.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The number of times to ping for status.
Will fail after that many unsuccessful attempts.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region: The AWS region where the cluster is located.
"""
def __init__(
self,
cluster: str,
task_arn: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str | None,
region: str | None,
log_group: str | None = None,
log_stream: str | None = None,
):
self.cluster = cluster
self.task_arn = task_arn
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
self.region = region
self.log_group = log_group
self.log_stream = log_stream
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
self.__class__.__module__ + "." + self.__class__.__qualname__,
{
"cluster": self.cluster,
"task_arn": self.task_arn,
"waiter_delay": self.waiter_delay,
"waiter_max_attempts": self.waiter_max_attempts,
"aws_conn_id": self.aws_conn_id,
"region": self.region,
"log_group": self.log_group,
"log_stream": self.log_stream,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
# fmt: off
async with EcsHook(aws_conn_id=self.aws_conn_id, region_name=self.region).async_conn as ecs_client,\
AwsLogsHook(aws_conn_id=self.aws_conn_id, region_name=self.region).async_conn as logs_client:
# fmt: on
waiter = ecs_client.get_waiter("tasks_stopped")
logs_token = None
while self.waiter_max_attempts >= 1:
self.waiter_max_attempts = self.waiter_max_attempts - 1
try:
await waiter.wait(
cluster=self.cluster, tasks=[self.task_arn], WaiterConfig={"MaxAttempts": 1}
)
break # we reach this point only if the waiter met a success criteria
except WaiterError as error:
if "terminal failure" in str(error):
raise
self.log.info("Status of the task is %s", error.last_response["tasks"][0]["lastStatus"])
await asyncio.sleep(int(self.waiter_delay))
finally:
if self.log_group and self.log_stream:
logs_token = await self._forward_logs(logs_client, logs_token)
yield TriggerEvent({"status": "success", "task_arn": self.task_arn})
async def _forward_logs(self, logs_client, next_token: str | None = None) -> str | None:
"""
Reads logs from the cloudwatch stream and prints them to the task logs.
:return: the token to pass to the next iteration to resume where we started.
"""
while True:
if next_token is not None:
token_arg: dict[str, str] = {"nextToken": next_token}
else:
token_arg = {}
try:
response = await logs_client.get_log_events(
logGroupName=self.log_group,
logStreamName=self.log_stream,
startFromHead=True,
**token_arg,
)
except ClientError as ce:
if ce.response["Error"]["Code"] == "ResourceNotFoundException":
self.log.info(
"Tried to get logs from stream %s in group %s but it didn't exist (yet). "
"Will try again.",
self.log_stream,
self.log_group,
)
return None
raise
events = response["events"]
for log_event in events:
self.log.info(AwsTaskLogFetcher.event_to_str(log_event))
if len(events) == 0 or next_token == response["nextForwardToken"]:
return response["nextForwardToken"]
next_token = response["nextForwardToken"]
| 8,898 | 39.266968 | 109 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/ec2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from functools import cached_property
from typing import Any
from airflow.providers.amazon.aws.hooks.ec2 import EC2Hook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class EC2StateSensorTrigger(BaseTrigger):
"""
Poll the EC2 instance and yield a TriggerEvent once the state of the instance matches the target_state.
:param instance_id: id of the AWS EC2 instance
:param target_state: target state of instance
:param aws_conn_id: aws connection to use
:param region_name: (optional) aws region name associated with the client
:param poll_interval: number of seconds to wait before attempting the next poll
"""
def __init__(
self,
instance_id: str,
target_state: str,
aws_conn_id: str = "aws_default",
region_name: str | None = None,
poll_interval: int = 60,
):
self.instance_id = instance_id
self.target_state = target_state
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.amazon.aws.triggers.ec2.EC2StateSensorTrigger",
{
"instance_id": self.instance_id,
"target_state": self.target_state,
"aws_conn_id": self.aws_conn_id,
"region_name": self.region_name,
"poll_interval": self.poll_interval,
},
)
@cached_property
def hook(self):
return EC2Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name, api_type="client_type")
async def run(self):
while True:
instance_state = await self.hook.get_instance_state_async(instance_id=self.instance_id)
self.log.info("instance state: %s", instance_state)
if instance_state == self.target_state:
yield TriggerEvent({"status": "success", "message": "target state met"})
break
else:
await asyncio.sleep(self.poll_interval)
| 2,942 | 37.220779 | 107 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/glue_crawler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.hooks.glue_crawler import GlueCrawlerHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
class GlueCrawlerCompleteTrigger(AwsBaseWaiterTrigger):
"""
Watches for a glue crawl, triggers when it finishes.
:param crawler_name: name of the crawler to watch
:param poll_interval: The amount of time in seconds to wait between attempts.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
crawler_name: str,
poll_interval: int | None = None,
aws_conn_id: str = "aws_default",
waiter_delay: int = 5,
waiter_max_attempts: int = 1500,
):
if poll_interval is not None:
warnings.warn(
"please use waiter_delay instead of poll_interval.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
waiter_delay = poll_interval or waiter_delay
super().__init__(
serialized_fields={"crawler_name": crawler_name},
waiter_name="crawler_ready",
waiter_args={"Name": crawler_name},
failure_message="Error while waiting for glue crawl to complete",
status_message="Status of glue crawl is",
status_queries=["Crawler.State", "Crawler.LastCrawl"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return GlueCrawlerHook(aws_conn_id=self.aws_conn_id)
| 2,622 | 38.742424 | 81 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/eks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import Any
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
from airflow.providers.amazon.aws.hooks.eks import EksHook
from airflow.providers.amazon.aws.triggers.base import AwsBaseWaiterTrigger
from airflow.providers.amazon.aws.utils.waiter_with_logging import async_wait
from airflow.triggers.base import TriggerEvent
class EksCreateClusterTrigger(AwsBaseWaiterTrigger):
"""
Trigger for EksCreateClusterOperator.
The trigger will asynchronously wait for the cluster to be created.
:param cluster_name: The name of the EKS cluster
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: Which AWS region the connection should use.
If this is None or empty then the default boto3 behaviour is used.
"""
def __init__(
self,
cluster_name: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region_name: str | None,
):
super().__init__(
serialized_fields={"cluster_name": cluster_name, "region_name": region_name},
waiter_name="cluster_active",
waiter_args={"name": cluster_name},
failure_message="Error checking Eks cluster",
status_message="Eks cluster status is",
status_queries=["cluster.status"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class EksDeleteClusterTrigger(AwsBaseWaiterTrigger):
"""
Trigger for EksDeleteClusterOperator.
The trigger will asynchronously wait for the cluster to be deleted. If there are
any nodegroups or fargate profiles associated with the cluster, they will be deleted
before the cluster is deleted.
:param cluster_name: The name of the EKS cluster
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region_name: Which AWS region the connection should use.
If this is None or empty then the default boto3 behaviour is used.
:param force_delete_compute: If True, any nodegroups or fargate profiles associated
with the cluster will be deleted before the cluster is deleted.
"""
def __init__(
self,
cluster_name,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region_name: str | None,
force_delete_compute: bool,
):
self.cluster_name = cluster_name
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.force_delete_compute = force_delete_compute
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
self.__class__.__module__ + "." + self.__class__.__qualname__,
{
"cluster_name": self.cluster_name,
"waiter_delay": str(self.waiter_delay),
"waiter_max_attempts": str(self.waiter_max_attempts),
"aws_conn_id": self.aws_conn_id,
"region_name": self.region_name,
"force_delete_compute": self.force_delete_compute,
},
)
def hook(self) -> AwsGenericHook:
return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
async def run(self):
async with self.hook.async_conn as client:
waiter = client.get_waiter("cluster_deleted")
if self.force_delete_compute:
await self.delete_any_nodegroups(client=client)
await self.delete_any_fargate_profiles(client=client)
await client.delete_cluster(name=self.cluster_name)
await async_wait(
waiter=waiter,
waiter_delay=int(self.waiter_delay),
waiter_max_attempts=int(self.waiter_max_attempts),
args={"name": self.cluster_name},
failure_message="Error deleting cluster",
status_message="Status of cluster is",
status_args=["cluster.status"],
)
yield TriggerEvent({"status": "deleted"})
async def delete_any_nodegroups(self, client) -> None:
"""
Deletes all EKS Nodegroups for a provided Amazon EKS Cluster.
All the EKS Nodegroups are deleted simultaneously. We wait for
all Nodegroups to be deleted before returning.
"""
nodegroups = await client.list_nodegroups(clusterName=self.cluster_name)
if nodegroups.get("nodegroups", None):
self.log.info("Deleting nodegroups")
# ignoring attr-defined here because aws_base hook defines get_waiter for all hooks
waiter = self.hook.get_waiter( # type: ignore[attr-defined]
"all_nodegroups_deleted", deferrable=True, client=client
)
for group in nodegroups["nodegroups"]:
await client.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=group)
await async_wait(
waiter=waiter,
waiter_delay=int(self.waiter_delay),
waiter_max_attempts=int(self.waiter_max_attempts),
args={"clusterName": self.cluster_name},
failure_message=f"Error deleting nodegroup for cluster {self.cluster_name}",
status_message="Deleting nodegroups associated with the cluster",
status_args=["nodegroups"],
)
self.log.info("All nodegroups deleted")
else:
self.log.info("No nodegroups associated with cluster %s", self.cluster_name)
async def delete_any_fargate_profiles(self, client) -> None:
"""
Deletes all EKS Fargate profiles for a provided Amazon EKS Cluster.
EKS Fargate profiles must be deleted one at a time, so we must wait
for one to be deleted before sending the next delete command.
"""
fargate_profiles = await client.list_fargate_profiles(clusterName=self.cluster_name)
if fargate_profiles.get("fargateProfileNames"):
self.log.info("Waiting for Fargate profiles to delete. This will take some time.")
for profile in fargate_profiles["fargateProfileNames"]:
await client.delete_fargate_profile(clusterName=self.cluster_name, fargateProfileName=profile)
await async_wait(
waiter=client.get_waiter("fargate_profile_deleted"),
waiter_delay=int(self.waiter_delay),
waiter_max_attempts=int(self.waiter_max_attempts),
args={"clusterName": self.cluster_name, "fargateProfileName": profile},
failure_message=f"Error deleting fargate profile for cluster {self.cluster_name}",
status_message="Status of fargate profile is",
status_args=["fargateProfile.status"],
)
self.log.info("All Fargate profiles deleted")
else:
self.log.info(f"No Fargate profiles associated with cluster {self.cluster_name}")
class EksCreateFargateProfileTrigger(AwsBaseWaiterTrigger):
"""
Asynchronously wait for the fargate profile to be created.
:param cluster_name: The name of the EKS cluster
:param fargate_profile_name: The name of the fargate profile
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_name: str,
fargate_profile_name: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region: str | None = None,
region_name: str | None = None,
):
if region is not None:
warnings.warn(
"please use region_name param instead of region",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
region_name = region
super().__init__(
serialized_fields={"cluster_name": cluster_name, "fargate_profile_name": fargate_profile_name},
waiter_name="fargate_profile_active",
waiter_args={"clusterName": cluster_name, "fargateProfileName": fargate_profile_name},
failure_message="Failure while creating Fargate profile",
status_message="Fargate profile not created yet",
status_queries=["fargateProfile.status"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class EksDeleteFargateProfileTrigger(AwsBaseWaiterTrigger):
"""
Asynchronously wait for the fargate profile to be deleted.
:param cluster_name: The name of the EKS cluster
:param fargate_profile_name: The name of the fargate profile
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_name: str,
fargate_profile_name: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region: str | None = None,
region_name: str | None = None,
):
if region is not None:
warnings.warn(
"please use region_name param instead of region",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
region_name = region
super().__init__(
serialized_fields={"cluster_name": cluster_name, "fargate_profile_name": fargate_profile_name},
waiter_name="fargate_profile_deleted",
waiter_args={"clusterName": cluster_name, "fargateProfileName": fargate_profile_name},
failure_message="Failure while deleting Fargate profile",
status_message="Fargate profile not deleted yet",
status_queries=["fargateProfile.status"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class EksCreateNodegroupTrigger(AwsBaseWaiterTrigger):
"""
Trigger for EksCreateNodegroupOperator.
The trigger will asynchronously poll the boto3 API and wait for the
nodegroup to be in the state specified by the waiter.
:param waiter_name: Name of the waiter to use, for instance 'nodegroup_active' or 'nodegroup_deleted'
:param cluster_name: The name of the EKS cluster associated with the node group.
:param nodegroup_name: The name of the nodegroup to check.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
"""
def __init__(
self,
cluster_name: str,
nodegroup_name: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region_name: str | None,
):
super().__init__(
serialized_fields={
"cluster_name": cluster_name,
"nodegroup_name": nodegroup_name,
"region_name": region_name,
},
waiter_name="nodegroup_active",
waiter_args={"clusterName": cluster_name, "nodegroupName": nodegroup_name},
failure_message="Error creating nodegroup",
status_message="Nodegroup status is",
status_queries=["nodegroup.status", "nodegroup.health.issues"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
class EksDeleteNodegroupTrigger(AwsBaseWaiterTrigger):
"""
Trigger for EksDeleteNodegroupOperator.
The trigger will asynchronously poll the boto3 API and wait for the
nodegroup to be in the state specified by the waiter.
:param waiter_name: Name of the waiter to use, for instance 'nodegroup_active' or 'nodegroup_deleted'
:param cluster_name: The name of the EKS cluster associated with the node group.
:param nodegroup_name: The name of the nodegroup to check.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
"""
def __init__(
self,
cluster_name: str,
nodegroup_name: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region_name: str | None,
):
super().__init__(
serialized_fields={"cluster_name": cluster_name, "nodegroup_name": nodegroup_name},
waiter_name="nodegroup_deleted",
waiter_args={"clusterName": cluster_name, "nodegroupName": nodegroup_name},
failure_message="Error deleting nodegroup",
status_message="Nodegroup status is",
status_queries=["nodegroup.status", "nodegroup.health.issues"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
| 16,029 | 41.407407 | 110 | py |
airflow | airflow-main/airflow/providers/amazon/aws/triggers/rds.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from airflow.providers.amazon.aws.hooks.rds import RdsHook
from airflow.providers.amazon.aws.utils.waiter_with_logging import async_wait
from airflow.triggers.base import BaseTrigger, TriggerEvent
class RdsDbInstanceTrigger(BaseTrigger):
"""
Trigger for RdsCreateDbInstanceOperator and RdsDeleteDbInstanceOperator.
The trigger will asynchronously poll the boto3 API and wait for the
DB instance to be in the state specified by the waiter.
:param waiter_name: Name of the waiter to use, for instance 'db_instance_available'
or 'db_instance_deleted'.
:param db_instance_identifier: The DB instance identifier for the DB instance to be polled.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
:param hook_params: The parameters to pass to the RdsHook.
:param response: The response from the RdsHook, to be passed back to the operator.
"""
def __init__(
self,
waiter_name: str,
db_instance_identifier: str,
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str,
region_name: str | None,
response: dict[str, Any],
):
self.db_instance_identifier = db_instance_identifier
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.waiter_name = waiter_name
self.response = response
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
# dynamically generate the fully qualified name of the class
self.__class__.__module__ + "." + self.__class__.__qualname__,
{
"db_instance_identifier": self.db_instance_identifier,
"waiter_delay": str(self.waiter_delay),
"waiter_max_attempts": str(self.waiter_max_attempts),
"aws_conn_id": self.aws_conn_id,
"region_name": self.region_name,
"waiter_name": self.waiter_name,
"response": self.response,
},
)
async def run(self):
self.hook = RdsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
async with self.hook.async_conn as client:
waiter = client.get_waiter(self.waiter_name)
await async_wait(
waiter=waiter,
waiter_delay=int(self.waiter_delay),
waiter_max_attempts=int(self.waiter_max_attempts),
args={"DBInstanceIdentifier": self.db_instance_identifier},
failure_message="Error checking DB Instance status",
status_message="DB instance status is",
status_args=["DBInstances[0].DBInstanceStatus"],
)
yield TriggerEvent({"status": "success", "response": self.response})
| 3,880 | 42.122222 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.